code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowerCamelCase ( lowerCAmelCase ):
a__: Optional[Any] = DistilBertTokenizer
a__: Optional[Any] = DistilBertTokenizerFast
a__: Optional[Any] = True
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
lowerCamelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 29 |
'''simple docstring'''
import numpy as np
def _A ( snake_case__ : np.ndarray , snake_case__ : np.ndarray , snake_case__ : float = 1E-12 , snake_case__ : int = 1_00 , ):
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[1]
# Ensure proper dimensionality.
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(snake_case__ ) == np.iscomplexobj(snake_case__ )
snake_case__ : Tuple = np.iscomplexobj(snake_case__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(snake_case__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
snake_case__ : str = False
snake_case__ : Any = 0
snake_case__ : Union[str, Any] = 0
snake_case__ : List[str] = 1E12
while not convergence:
# Multiple matrix by the vector.
snake_case__ : Any = np.dot(snake_case__ , snake_case__ )
# Normalize the resulting output vector.
snake_case__ : Dict = w / np.linalg.norm(snake_case__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
snake_case__ : Tuple = vector.conj().T if is_complex else vector.T
snake_case__ : Optional[Any] = np.dot(snake_case__ , np.dot(snake_case__ , snake_case__ ) )
# Check convergence.
snake_case__ : Union[str, Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
snake_case__ : Optional[int] = True
snake_case__ : int = lambda_
if is_complex:
snake_case__ : Optional[int] = np.real(lambda_ )
return lambda_, vector
def _A ( ):
snake_case__ : int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
snake_case__ : List[Any] = np.array([41, 4, 20] )
snake_case__ : str = real_input_matrix.astype(np.complexaaa )
snake_case__ : str = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
snake_case__ : List[str] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
snake_case__ : Dict = real_input_matrix
snake_case__ : str = real_vector
elif problem_type == "complex":
snake_case__ : Optional[Any] = complex_input_matrix
snake_case__ : Any = complex_vector
# Our implementation.
snake_case__ ,snake_case__ : Optional[Any] = power_iteration(snake_case__ , snake_case__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
snake_case__ ,snake_case__ : int = np.linalg.eigh(snake_case__ )
# Last eigenvalue is the maximum one.
snake_case__ : List[str] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
snake_case__ : Dict = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(snake_case__ ) - np.abs(snake_case__ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 261 | 0 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def A ( ):
_snake_case : str = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
_snake_case : int = Dataset.from_dict(UpperCAmelCase )
return dataset
class _a( __A ):
def lowercase ( self ) -> Optional[int]:
'''simple docstring'''
_snake_case : int = get_dataset()
_snake_case : int = make_duplicate_clusters(__snake_case , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def lowercase ( self ) -> Optional[int]:
'''simple docstring'''
_snake_case : int = get_dataset()
_snake_case , _snake_case : int = deduplicate_dataset(__snake_case )
self.assertEqual(len(__snake_case ) , 2 )
print(__snake_case )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , __snake_case ) | 278 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowerCAmelCase :List[Any] = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def A ( UpperCAmelCase ):
_snake_case : List[str] = test_results.split(" " )
_snake_case : Optional[int] = 0
_snake_case : int = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_snake_case : Optional[Any] = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(UpperCAmelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A ( UpperCAmelCase ):
_snake_case : Union[str, Any] = {}
_snake_case : Any = None
_snake_case : str = False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , UpperCAmelCase ):
_snake_case : Union[str, Any] = True
_snake_case : Tuple = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
_snake_case : Optional[Any] = line
_snake_case : Dict = False
return failures
class _a:
def __init__( self , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
_snake_case : Dict = title
_snake_case : Optional[Any] = doc_test_results["time_spent"].split("," )[0]
_snake_case : Dict = doc_test_results["success"]
_snake_case : Optional[Any] = doc_test_results["failures"]
_snake_case : Tuple = self.n_success + self.n_failures
# Failures and success of the modeling tests
_snake_case : Union[str, Any] = doc_test_results
@property
def lowercase ( self ) -> str:
'''simple docstring'''
_snake_case : Dict = [self._time_spent]
_snake_case : Tuple = 0
for time in time_spent:
_snake_case : str = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__snake_case ) == 1:
_snake_case : List[Any] = [0, 0, time_parts[0]]
_snake_case , _snake_case , _snake_case : int = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_6_0_0 + minutes * 6_0 + seconds
_snake_case , _snake_case , _snake_case : List[str] = total_secs // 3_6_0_0, (total_secs % 3_6_0_0) // 6_0, total_secs % 6_0
return f"""{int(__snake_case )}h{int(__snake_case )}m{int(__snake_case )}s"""
@property
def lowercase ( self ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowercase ( self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def lowercase ( self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
f""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def lowercase ( self ) -> Dict:
'''simple docstring'''
_snake_case : List[str] = 4_0
_snake_case : Any = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(__snake_case , __snake_case )}
_snake_case : int = ""
for category, failures in category_failures.items():
if len(__snake_case ) == 0:
continue
if report != "":
report += "\n\n"
report += f"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__snake_case )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def lowercase ( self ) -> str:
'''simple docstring'''
_snake_case : Optional[Any] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__snake_case )
@staticmethod
def lowercase ( ) -> Dict:
'''simple docstring'''
_snake_case : Dict = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(__snake_case )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=__snake_case , )
def lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
_snake_case : List[str] = f"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else "All tests passed."
_snake_case : Tuple = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=__snake_case , )
def lowercase ( self , __snake_case , __snake_case , __snake_case , __snake_case ) -> Any:
'''simple docstring'''
_snake_case : str = ""
for key, value in failures.items():
_snake_case : Any = value[:2_0_0] + " [Truncated]" if len(__snake_case ) > 2_5_0 else value
failures_text += f"""*{key}*\n_{value}_\n\n"""
_snake_case : str = job_name
_snake_case : List[str] = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
_snake_case : Union[str, Any] = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
_snake_case : Optional[int] = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
_snake_case : Tuple = sorted(self.doc_test_results.items() , key=lambda __snake_case : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
_snake_case : Tuple = f"""*Num failures* :{len(job_result['failed'] )} \n"""
_snake_case : Tuple = job_result["failures"]
_snake_case : Tuple = self.get_reply_blocks(__snake_case , __snake_case , __snake_case , text=__snake_case )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=f"""Results for {job}""" , blocks=__snake_case , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def A ( ):
_snake_case : Optional[Any] = os.environ["GITHUB_RUN_ID"]
_snake_case : Union[str, Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
_snake_case : List[Any] = requests.get(UpperCAmelCase ).json()
_snake_case : str = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_snake_case : Union[str, Any] = math.ceil((result["total_count"] - 100) / 100 )
for i in range(UpperCAmelCase ):
_snake_case : Any = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , UpperCAmelCase )
return {}
def A ( UpperCAmelCase ):
_snake_case : Optional[int] = {}
if os.path.exists(UpperCAmelCase ):
_snake_case : Optional[Any] = os.listdir(UpperCAmelCase )
for file in files:
try:
with open(os.path.join(UpperCAmelCase , UpperCAmelCase ) , encoding="utf-8" ) as f:
_snake_case : Optional[Any] = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(UpperCAmelCase , UpperCAmelCase )}.""" ) from e
return _artifact
def A ( ):
class _a:
def __init__( self , __snake_case ) -> List[Any]:
'''simple docstring'''
_snake_case : Any = name
_snake_case : Any = []
def __str__( self ) -> Tuple:
'''simple docstring'''
return self.name
def lowercase ( self , __snake_case ) -> List[Any]:
'''simple docstring'''
self.paths.append({"name": self.name, "path": path} )
_snake_case : Dict[str, Artifact] = {}
_snake_case : Any = filter(os.path.isdir , os.listdir() )
for directory in directories:
_snake_case : Optional[int] = directory
if artifact_name not in _available_artifacts:
_snake_case : Optional[Any] = Artifact(UpperCAmelCase )
_available_artifacts[artifact_name].add_path(UpperCAmelCase )
return _available_artifacts
if __name__ == "__main__":
__lowerCAmelCase :str = get_job_links()
__lowerCAmelCase :Optional[int] = retrieve_available_artifacts()
__lowerCAmelCase :Dict = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowerCAmelCase :Any = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowerCAmelCase :str = github_actions_job_links.get('run_doctests')
__lowerCAmelCase :List[Any] = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
__lowerCAmelCase :Optional[Any] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase :Dict = handle_test_results(artifact['stats'])
__lowerCAmelCase :List[Any] = failed
__lowerCAmelCase :Optional[int] = success
__lowerCAmelCase :str = time_spent[1:-1] + ', '
__lowerCAmelCase :Optional[Any] = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
__lowerCAmelCase :Any = line.replace('FAILED ', '')
__lowerCAmelCase :int = line.split()[0].replace('\n', '')
if "::" in line:
__lowerCAmelCase , __lowerCAmelCase :List[str] = line.split('::')
else:
__lowerCAmelCase , __lowerCAmelCase :int = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowerCAmelCase :Any = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowerCAmelCase :Union[str, Any] = all_failures[test] if test in all_failures else 'N/A'
__lowerCAmelCase :Optional[Any] = failure
break
__lowerCAmelCase :Optional[int] = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply() | 278 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a__ : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[int] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="megatron-bert"
def __init__( self : Dict , a__ : Union[str, Any]=29056 , a__ : Dict=1024 , a__ : str=24 , a__ : Any=16 , a__ : Tuple=4096 , a__ : Optional[int]="gelu" , a__ : Tuple=0.1 , a__ : Tuple=0.1 , a__ : Any=512 , a__ : Optional[Any]=2 , a__ : str=0.02 , a__ : Optional[int]=1e-1_2 , a__ : Union[str, Any]=0 , a__ : Optional[Any]="absolute" , a__ : Dict=True , **a__ : Dict , ):
super().__init__(pad_token_id=a__ , **a__ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
| 51 | 1 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
A__ : Any = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
A__ : Dict = 0
A__ : Optional[int] = 2
while digits < n:
index += 1
A__ : Dict = len(str(fibonacci(__UpperCamelCase ) ) )
return index
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10_00 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 55 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1024 , UpperCamelCase__=1024 , UpperCamelCase__=3.6 ):
A__ : str = tokenizer
A__ : int = tokenizer.bos_token_id
A__ : List[Any] = dataset
A__ : Tuple = seq_length
A__ : Any = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
A__ : Dict = iter(self.dataset )
A__ : Tuple = True
while more_examples:
A__ , A__ : Optional[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(UpperCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
A__ : Dict = False
break
A__ : str = tokenizer(UpperCamelCase__ , truncation=UpperCamelCase__ )['''input_ids''']
A__ : Optional[int] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(UpperCamelCase__ ) , self.seq_length ):
A__ : Optional[int] = all_token_ids[i : i + self.seq_length]
if len(UpperCamelCase__ ) == self.seq_length:
yield torch.tensor(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
A__ : Any = {'''streaming''': True}
A__ : List[str] = load_dataset(args.dataset_name , split='''train''' , **__UpperCamelCase )
A__ : List[str] = ConstantLengthDataset(__UpperCamelCase , __UpperCamelCase , seq_length=args.seq_length )
A__ : int = DataLoader(__UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
model.eval()
A__ : Dict = []
for step, batch in enumerate(__UpperCamelCase ):
with torch.no_grad():
A__ : Any = model(__UpperCamelCase , labels=__UpperCamelCase )
A__ : Tuple = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
A__ : Tuple = torch.mean(torch.cat(__UpperCamelCase ) )
try:
A__ : Optional[Any] = torch.exp(__UpperCamelCase )
except OverflowError:
A__ : Union[str, Any] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_SCREAMING_SNAKE_CASE : List[Any] = Accelerator()
# Parse configuration
_SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser(EvaluationArguments)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
set_seed(args.seed)
# Logging
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_SCREAMING_SNAKE_CASE : Optional[Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""") | 55 | 1 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def _a ( _lowerCamelCase , _lowerCamelCase ) -> bool:
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _a ( _lowerCamelCase ) -> list[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = []
__snake_case : Dict = 11
__snake_case : List[Any] = int("""1""" + """0""" * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
__snake_case : str = 10
return solutions
def _a ( _lowerCamelCase = 2 ) -> int:
"""simple docstring"""
__snake_case : List[Any] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
__snake_case : List[Any] = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 26 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
A__ : int = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
A__ : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a ( lowerCamelCase_ ):
'''simple docstring'''
if "://" in dataset_path:
lowercase__ = dataset_path.split('''://''' )[1]
return dataset_path
def a ( lowerCamelCase_ ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = not is_remote_filesystem(lowerCamelCase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCamelCase_ ) , fs._strip_protocol(lowerCamelCase_ ) )
else:
fs.mv(lowerCamelCase_ , lowerCamelCase_ , recursive=lowerCamelCase_ )
def a ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowercase__ = None
lowercase__ = None
lowercase__ = threading.Lock()
| 183 | 0 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Tuple , snake_case_ : str , snake_case_ : int=13 , snake_case_ : str=7 , snake_case_ : Any=True , snake_case_ : List[Any]=True , snake_case_ : str=True , snake_case_ : int=True , snake_case_ : Any=99 , snake_case_ : List[Any]=64 , snake_case_ : List[str]=32 , snake_case_ : Any=5 , snake_case_ : List[str]=4 , snake_case_ : List[Any]=37 , snake_case_ : Dict="gelu" , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Optional[Any]=0.1 , snake_case_ : List[Any]=512 , snake_case_ : Optional[Any]=16 , snake_case_ : str=2 , snake_case_ : str=0.02 , snake_case_ : List[str]=3 , snake_case_ : List[Any]=4 , snake_case_ : Optional[int]=None , ):
UpperCamelCase_: Optional[Any] = parent
UpperCamelCase_: str = batch_size
UpperCamelCase_: Optional[Any] = seq_length
UpperCamelCase_: str = is_training
UpperCamelCase_: Any = use_input_mask
UpperCamelCase_: int = use_token_type_ids
UpperCamelCase_: Optional[int] = use_labels
UpperCamelCase_: Optional[int] = vocab_size
UpperCamelCase_: List[str] = hidden_size
UpperCamelCase_: Union[str, Any] = embedding_size
UpperCamelCase_: Any = num_hidden_layers
UpperCamelCase_: Any = num_attention_heads
UpperCamelCase_: str = intermediate_size
UpperCamelCase_: Dict = hidden_act
UpperCamelCase_: List[Any] = hidden_dropout_prob
UpperCamelCase_: Optional[int] = attention_probs_dropout_prob
UpperCamelCase_: str = max_position_embeddings
UpperCamelCase_: Optional[Any] = type_vocab_size
UpperCamelCase_: List[Any] = type_sequence_label_size
UpperCamelCase_: Dict = initializer_range
UpperCamelCase_: int = num_labels
UpperCamelCase_: Tuple = num_choices
UpperCamelCase_: List[Any] = scope
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: Tuple = None
if self.use_input_mask:
UpperCamelCase_: Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_: int = None
if self.use_token_type_ids:
UpperCamelCase_: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_: Any = None
UpperCamelCase_: Dict = None
UpperCamelCase_: List[str] = None
if self.use_labels:
UpperCamelCase_: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_: str = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_: List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self : Tuple ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : Optional[Any] ):
UpperCamelCase_: Optional[Any] = MegatronBertModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
UpperCamelCase_: Optional[int] = model(snake_case_ , token_type_ids=snake_case_ )
UpperCamelCase_: Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Optional[Any] ):
UpperCamelCase_: Optional[Any] = MegatronBertForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: Optional[int] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Optional[Any] ):
UpperCamelCase_: List[str] = MegatronBertForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: Any = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : List[Any] ):
UpperCamelCase_: Tuple = MegatronBertForNextSentencePrediction(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: int = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase__ ( self : str , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : List[str] ):
UpperCamelCase_: List[Any] = MegatronBertForPreTraining(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: Optional[Any] = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , next_sentence_label=snake_case_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase__ ( self : Tuple , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : int , snake_case_ : Optional[int] ):
UpperCamelCase_: List[Any] = MegatronBertForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: List[str] = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self : Any , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : int ):
UpperCamelCase_: List[Any] = self.num_labels
UpperCamelCase_: List[Any] = MegatronBertForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: int = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : str , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Any ):
UpperCamelCase_: Optional[Any] = self.num_labels
UpperCamelCase_: Any = MegatronBertForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: List[str] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self : List[str] , snake_case_ : Tuple , snake_case_ : int , snake_case_ : str , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : str ):
UpperCamelCase_: Tuple = self.num_choices
UpperCamelCase_: Any = MegatronBertForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_: List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_: Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_: Any = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: int = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
),
): List[str] = config_and_inputs
UpperCamelCase_: Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( _A , _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : List[str] = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase : int = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : int = True
# test_resize_embeddings = False
__UpperCamelCase : str = False
def lowerCAmelCase__ ( self : int , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Any=False ):
UpperCamelCase_: int = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
UpperCamelCase_: str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case_ )
UpperCamelCase_: List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Dict = MegatronBertModelTester(self )
UpperCamelCase_: int = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCAmelCase__ ( self : str ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*snake_case_ )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*snake_case_ )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*snake_case_ )
def A__ ( lowerCamelCase ) -> List[Any]:
return torch.tensor(
lowerCamelCase , dtype=torch.long , device=lowerCamelCase , )
lowerCamelCase_ : str = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip("""Model is not available.""" )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
UpperCamelCase_: Optional[int] = os.path.join(os.environ["""MYDIR"""] , snake_case_ )
UpperCamelCase_: Tuple = MegatronBertModel.from_pretrained(snake_case_ )
model.to(snake_case_ )
model.half()
UpperCamelCase_: Optional[int] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
UpperCamelCase_: Optional[int] = model(snake_case_ )[0]
UpperCamelCase_: Tuple = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , snake_case_ )
UpperCamelCase_: Optional[int] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
UpperCamelCase_: Optional[Any] = output[0, ii, jj]
UpperCamelCase_: Dict = expected[3 * ii + jj]
UpperCamelCase_: Optional[int] = """ii={} jj={} a={} b={}""".format(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
self.assertTrue(math.isclose(snake_case_ , snake_case_ , rel_tol=snake_case_ , abs_tol=snake_case_ ) , msg=snake_case_ )
| 670 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Union[str, Any] = """laion/clap-htsat-unfused"""
UpperCamelCase_: List[str] = tempfile.mkdtemp()
def lowerCAmelCase__ ( self : Tuple , **snake_case_ : Optional[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : str , **snake_case_ : Any ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: Dict = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: List[str] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Dict = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Optional[Any] = floats_list((3, 1000) )
UpperCamelCase_: List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: int = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = self.get_feature_extractor()
UpperCamelCase_: List[str] = self.get_tokenizer()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Dict = """This is a test string"""
UpperCamelCase_: Tuple = processor(text=snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = self.get_feature_extractor()
UpperCamelCase_: Any = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Tuple = processor.batch_decode(snake_case_ )
UpperCamelCase_: str = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = self.get_feature_extractor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 670 | 1 |
import math
def a ( snake_case__: int ):
'''simple docstring'''
lowercase_ = [True] * n
lowercase_ = False
lowercase_ = False
lowercase_ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowercase_ = i * 2
while index < n:
lowercase_ = False
lowercase_ = index + i
lowercase_ = [2]
for i in range(3 , snake_case__ , 2 ):
if is_prime[i]:
primes.append(snake_case__ )
return primes
def a ( snake_case__: int = 999_966_663_333 ):
'''simple docstring'''
lowercase_ = math.floor(math.sqrt(snake_case__ ) ) + 100
lowercase_ = prime_sieve(snake_case__ )
lowercase_ = 0
lowercase_ = 0
lowercase_ = primes[prime_index]
while (last_prime**2) <= limit:
lowercase_ = primes[prime_index + 1]
lowercase_ = last_prime**2
lowercase_ = next_prime**2
# Get numbers divisible by lps(current)
lowercase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowercase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowercase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowercase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 97 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
def __init__( self : Any , __A : Optional[int] , __A : Optional[int]=2 , __A : int=3 , __A : Union[str, Any]=4 , __A : Tuple=2 , __A : Union[str, Any]=7 , __A : Any=True , __A : List[str]=True , __A : Tuple=True , __A : Tuple=True , __A : List[str]=99 , __A : Tuple=36 , __A : Union[str, Any]=3 , __A : str=4 , __A : str=37 , __A : int="gelu" , __A : Union[str, Any]=0.1 , __A : str=0.1 , __A : List[Any]=512 , __A : Optional[int]=16 , __A : int=2 , __A : List[Any]=0.02 , __A : Optional[Any]=6 , __A : int=6 , __A : str=3 , __A : Optional[int]=4 , __A : Union[str, Any]=None , __A : Tuple=1000 , ) ->Any:
"""simple docstring"""
a__ :Any = parent
a__ :Optional[int] = batch_size
a__ :Union[str, Any] = num_channels
a__ :Any = image_size
a__ :Optional[Any] = patch_size
a__ :Optional[Any] = text_seq_length
a__ :int = is_training
a__ :Tuple = use_input_mask
a__ :Any = use_token_type_ids
a__ :int = use_labels
a__ :str = vocab_size
a__ :List[str] = hidden_size
a__ :Optional[int] = num_hidden_layers
a__ :List[str] = num_attention_heads
a__ :List[str] = intermediate_size
a__ :int = hidden_act
a__ :Optional[Any] = hidden_dropout_prob
a__ :Union[str, Any] = attention_probs_dropout_prob
a__ :int = max_position_embeddings
a__ :Tuple = type_vocab_size
a__ :Union[str, Any] = type_sequence_label_size
a__ :List[Any] = initializer_range
a__ :str = coordinate_size
a__ :Union[str, Any] = shape_size
a__ :int = num_labels
a__ :Optional[int] = num_choices
a__ :str = scope
a__ :int = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
a__ :str = text_seq_length
a__ :Tuple = (image_size // patch_size) ** 2 + 1
a__ :Optional[int] = self.text_seq_length + self.image_seq_length
def _snake_case ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
a__ :str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
a__ :Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a__ :Optional[Any] = bbox[i, j, 3]
a__ :List[str] = bbox[i, j, 1]
a__ :str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a__ :Any = bbox[i, j, 2]
a__ :int = bbox[i, j, 0]
a__ :Optional[Any] = t
a__ :int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ :List[Any] = None
if self.use_input_mask:
a__ :str = random_attention_mask([self.batch_size, self.text_seq_length] )
a__ :Optional[Any] = None
if self.use_token_type_ids:
a__ :str = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
a__ :List[str] = None
a__ :List[str] = None
if self.use_labels:
a__ :str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ :List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
a__ :Tuple = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self : Tuple , __A : Any , __A : Union[str, Any] , __A : List[str] , __A : Dict , __A : int , __A : Union[str, Any] , __A : Union[str, Any] , __A : Any ) ->Dict:
"""simple docstring"""
a__ :Optional[int] = LayoutLMvaModel(config=__A )
model.to(__A )
model.eval()
# text + image
a__ :List[Any] = model(__A , pixel_values=__A )
a__ :int = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A )
a__ :Union[str, Any] = model(__A , bbox=__A , pixel_values=__A , token_type_ids=__A )
a__ :Optional[Any] = model(__A , bbox=__A , pixel_values=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
a__ :Dict = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
a__ :Dict = model(pixel_values=__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self : Tuple , __A : List[str] , __A : str , __A : Union[str, Any] , __A : str , __A : Any , __A : List[Any] , __A : str , __A : Tuple ) ->Tuple:
"""simple docstring"""
a__ :Optional[Any] = self.num_labels
a__ :Tuple = LayoutLMvaForSequenceClassification(__A )
model.to(__A )
model.eval()
a__ :str = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Optional[int] , __A : str , __A : Tuple , __A : Union[str, Any] , __A : Union[str, Any] , __A : Dict , __A : int , __A : Optional[int] , __A : int ) ->List[str]:
"""simple docstring"""
a__ :Dict = self.num_labels
a__ :Dict = LayoutLMvaForTokenClassification(config=__A )
model.to(__A )
model.eval()
a__ :Tuple = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self : str , __A : Optional[Any] , __A : Optional[Any] , __A : List[str] , __A : Union[str, Any] , __A : int , __A : Optional[int] , __A : Union[str, Any] , __A : str ) ->Dict:
"""simple docstring"""
a__ :List[str] = LayoutLMvaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
a__ :List[str] = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self : List[Any] ) ->Dict:
"""simple docstring"""
a__ :str = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) :str = config_and_inputs
a__ :Tuple = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _a ,_a ,unittest.TestCase):
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _snake_case ( self : List[str] , __A : Union[str, Any] , __A : Optional[Any] , __A : Optional[int] , __A : List[str] , __A : Dict ) ->Dict:
"""simple docstring"""
return True
def _snake_case ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
a__ :int = LayoutLMvaModelTester(self )
a__ :Union[str, Any] = ConfigTester(self , config_class=__A , hidden_size=37 )
def _snake_case ( self : int , __A : int , __A : List[Any] , __A : Optional[int]=False ) ->Optional[Any]:
"""simple docstring"""
a__ :Union[str, Any] = copy.deepcopy(__A )
if model_class in get_values(__A ):
a__ :Dict = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__A , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__A ):
a__ :List[str] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in get_values(__A ):
a__ :int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
a__ :Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in [
*get_values(__A ),
]:
a__ :List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in [
*get_values(__A ),
]:
a__ :List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__A , )
return inputs_dict
def _snake_case ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
a__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _snake_case ( self : int ) ->Optional[Any]:
"""simple docstring"""
a__ :str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ :List[Any] = type
self.model_tester.create_and_check_model(*__A )
def _snake_case ( self : Tuple ) ->str:
"""simple docstring"""
a__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def _snake_case ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
def _snake_case ( self : Optional[int] ) ->Dict:
"""simple docstring"""
a__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
@slow
def _snake_case ( self : Union[str, Any] ) ->str:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ :int = LayoutLMvaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
a__ :List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class lowerCAmelCase_ ( unittest.TestCase):
@cached_property
def _snake_case ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=__A ) if is_vision_available() else None
@slow
def _snake_case ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
a__ :Optional[Any] = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(__A )
a__ :str = self.default_image_processor
a__ :List[str] = prepare_img()
a__ :Tuple = image_processor(images=__A , return_tensors="pt" ).pixel_values.to(__A )
a__ :Dict = torch.tensor([[1, 2]] )
a__ :Optional[Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
a__ :int = model(
input_ids=input_ids.to(__A ) , bbox=bbox.to(__A ) , pixel_values=pixel_values.to(__A ) , )
# verify the logits
a__ :int = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , __A )
a__ :Any = torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(__A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1E-4 ) )
| 395 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Tuple = KandinskyVaaPipeline
a__ : List[Any] = [
"""image_embeds""",
"""negative_image_embeds""",
]
a__ : Union[str, Any] = ["""image_embeds""", """negative_image_embeds"""]
a__ : Dict = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a__ : str = False
@property
def _lowercase (self : Optional[Any] ):
return 32
@property
def _lowercase (self : Tuple ):
return 32
@property
def _lowercase (self : Tuple ):
return self.time_input_dim
@property
def _lowercase (self : Optional[int] ):
return self.time_input_dim * 4
@property
def _lowercase (self : Union[str, Any] ):
return 100
@property
def _lowercase (self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase_ = UNetaDConditionModel(**__a )
return model
@property
def _lowercase (self : Optional[int] ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowercase (self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.dummy_unet
UpperCAmelCase_ = self.dummy_movq
UpperCAmelCase_ = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=__a , set_alpha_to_one=__a , steps_offset=1 , prediction_type="epsilon" , thresholding=__a , )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _lowercase (self : List[Any] , __a : int , __a : Tuple=0 ):
UpperCAmelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__a ) ).to(__a )
UpperCAmelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__a )
if str(__a ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(__a )
else:
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(__a )
UpperCAmelCase_ = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = "cpu"
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**__a )
UpperCAmelCase_ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = pipe(**self.get_dummy_inputs(__a ) )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = pipe(
**self.get_dummy_inputs(__a ) , return_dict=__a , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Any ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
UpperCAmelCase_ = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__a )
UpperCAmelCase_ = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCAmelCase_ = pipeline.to(__a )
pipeline.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "red cat, 4k photo"
UpperCAmelCase_ = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ = pipe_prior(
__a , generator=__a , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase_ = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCAmelCase_ = pipeline(
image_embeds=__a , negative_image_embeds=__a , generator=__a , num_inference_steps=100 , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__a , __a )
| 415 | '''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_: Tuple ={'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[str] =[
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 415 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A : List[Any] = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ['GLPNFeatureExtractor']
A : Tuple = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 287 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
lowerCAmelCase_ : List[Any] = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
lowerCAmelCase_ : Optional[int] = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
lowerCAmelCase_ : Any = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
lowerCAmelCase_ : Tuple = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
lowerCAmelCase_ : Optional[int] = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def _lowerCamelCase ( lowercase : Any , lowercase : Any ) -> Optional[Any]:
for tf_name, hf_name in patterns:
_a = k.replace(lowercase , lowercase )
return k
def _lowerCamelCase ( lowercase : dict , lowercase : dict ) -> BigBirdPegasusForConditionalGeneration:
_a = BigBirdPegasusConfig(**lowercase )
_a = BigBirdPegasusForConditionalGeneration(lowercase )
_a = torch_model.state_dict()
_a = {}
# separating decoder weights
_a = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
_a = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
_a = [k.endswith(lowercase ) for ending in KEYS_TO_IGNORE]
if any(lowercase ):
continue
_a = DECODER_PATTERNS
_a = rename_state_dict_key(lowercase , lowercase )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
_a = v.T
_a = torch.from_numpy(lowercase )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
_a = [k.endswith(lowercase ) for ending in KEYS_TO_IGNORE]
if any(lowercase ):
continue
_a = REMAINING_PATTERNS
_a = rename_state_dict_key(lowercase , lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
_a = v.T
_a = torch.from_numpy(lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
_a = mapping["model.embed_positions.weight"]
_a = mapping.pop("model.embed_positions.weight" )
_a , _a = torch_model.load_state_dict(lowercase , strict=lowercase )
_a = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def _lowerCamelCase ( lowercase : List[Any] ) -> Dict:
_a = tf.train.list_variables(lowercase )
_a = {}
_a = ["global_step"]
for name, shape in tqdm(lowercase , desc="converting tf checkpoint to dict" ):
_a = any(pat in name for pat in ignore_name )
if skip_key:
continue
_a = tf.train.load_variable(lowercase , lowercase )
_a = array
return tf_weights
def _lowerCamelCase ( lowercase : str , lowercase : str , lowercase : dict ) -> Union[str, Any]:
_a = get_tf_weights_as_numpy(lowercase )
_a = convert_bigbird_pegasus(lowercase , lowercase )
torch_model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
lowerCAmelCase_ : Optional[Any] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 692 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
@property
def lowerCamelCase__ ( self : Optional[Any] ):
torch.manual_seed(0 )
__lowerCamelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Tuple = self.dummy_uncond_unet
__lowerCamelCase : Any = ScoreSdeVeScheduler()
__lowerCamelCase : Tuple = ScoreSdeVePipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
sde_ve.to(UpperCAmelCase )
sde_ve.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : int = torch.manual_seed(0 )
__lowerCamelCase : Tuple = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=UpperCAmelCase ).images
__lowerCamelCase : Tuple = torch.manual_seed(0 )
__lowerCamelCase : Optional[Any] = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=UpperCAmelCase , return_dict=UpperCAmelCase )[
0
]
__lowerCamelCase : Any = image[0, -3:, -3:, -1]
__lowerCamelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Optional[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Dict = "google/ncsnpp-church-256"
__lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = ScoreSdeVeScheduler.from_pretrained(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = ScoreSdeVePipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
sde_ve.to(UpperCAmelCase )
sde_ve.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : int = torch.manual_seed(0 )
__lowerCamelCase : Any = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=UpperCAmelCase ).images
__lowerCamelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__lowerCamelCase : Tuple = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 366 | """simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _snake_case :
snake_case__ = None
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = None
snake_case__ = None
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = True
snake_case__ = None
snake_case__ = 1
snake_case__ = None
snake_case__ = False
snake_case__ = None
snake_case__ = None
def lowerCamelCase__ ( self : Any ):
return self.__class__(**{k: copy.deepcopy(UpperCAmelCase ) for k, v in self.__dict__.items()} ) | 366 | 1 |
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : int = 3 , SCREAMING_SNAKE_CASE : int = 7 , SCREAMING_SNAKE_CASE : int = 1000000 ):
'''simple docstring'''
__lowerCamelCase : Optional[int] =0
__lowerCamelCase : str =1
for current_denominator in range(1 , limit + 1 ):
__lowerCamelCase : Any =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__lowerCamelCase : Any =current_numerator
__lowerCamelCase : int =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 179 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_UpperCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
__lowerCamelCase : Optional[Any] =XLMProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE )
__lowerCamelCase , __lowerCamelCase : List[Any] =XLMProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
else:
__lowerCamelCase : int =ProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE )
__lowerCamelCase , __lowerCamelCase : int =ProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[Any] =['''key_proj''', '''value_proj''', '''query_proj''']
__lowerCamelCase : Tuple ={
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
__lowerCamelCase : int =key.split('''.''' )
if attributes[0] == "lm_head":
__lowerCamelCase : int =prophet
__lowerCamelCase : Optional[int] =prophet_old
else:
__lowerCamelCase : Any =prophet.prophetnet
__lowerCamelCase : Union[str, Any] =prophet_old.model
__lowerCamelCase : Optional[Any] =False
for attribute in attributes:
if attribute in mapping:
__lowerCamelCase : Optional[Any] =mapping[attribute]
if not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) > 0:
__lowerCamelCase : Any =attribute
elif hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCamelCase : Any =attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowerCamelCase : str =old_model.weight
logger.info(F'{attribute} is initialized.' )
__lowerCamelCase : Any =True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowerCamelCase : Union[str, Any] =old_model.bias
logger.info(F'{attribute} is initialized' )
__lowerCamelCase : str =True
break
elif attribute in special_keys and hasattr(SCREAMING_SNAKE_CASE , '''in_proj_weight''' ):
__lowerCamelCase : int =old_model.in_proj_weight.shape[0] // 3
__lowerCamelCase : Union[str, Any] =getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowerCamelCase : List[str] =nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowerCamelCase : str =nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowerCamelCase : List[str] =nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowerCamelCase : Tuple =nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowerCamelCase : Optional[Any] =nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowerCamelCase : int =nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowerCamelCase : Dict =True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowerCamelCase : str =nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowerCamelCase : Dict =True
break
if attribute.isdigit():
__lowerCamelCase : List[str] =model[int(SCREAMING_SNAKE_CASE )]
__lowerCamelCase : Optional[Any] =old_model[int(SCREAMING_SNAKE_CASE )]
else:
__lowerCamelCase : int =getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if old_attribute == "":
__lowerCamelCase : Dict =old_model
else:
if not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
__lowerCamelCase : Tuple =getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 179 | 1 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCAmelCase__ = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
lowercase__ : List[str] = XLNetConfig.from_json_file(lowerCamelCase__ )
lowercase__ : Optional[int] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowercase__ : Any = finetuning_task
lowercase__ : Optional[int] = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase__ : str = XLNetForSequenceClassification(lowerCamelCase__ )
elif "squad" in finetuning_task:
lowercase__ : Optional[Any] = finetuning_task
lowercase__ : Tuple = XLNetForQuestionAnswering(lowerCamelCase__ )
else:
lowercase__ : int = XLNetLMHeadModel(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
lowercase__ : Optional[int] = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : int = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
print(F"""Save PyTorch model to {os.path.abspath(lowerCamelCase__ )}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
print(F"""Save configuration file to {os.path.abspath(lowerCamelCase__ )}""" )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
lowerCAmelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 81 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : List[Any]=("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE : Dict=(64,) , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : Optional[int]=32 , SCREAMING_SNAKE_CASE : List[str]="silu" , SCREAMING_SNAKE_CASE : str=True , ):
super().__init__()
lowercase__ : str = layers_per_block
lowercase__ : int = torch.nn.Convad(
SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Union[str, Any] = None
lowercase__ : Optional[int] = nn.ModuleList([] )
# down
lowercase__ : Dict = block_out_channels[0]
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = output_channel
lowercase__ : Dict = block_out_channels[i]
lowercase__ : List[str] = i == len(SCREAMING_SNAKE_CASE ) - 1
lowercase__ : Union[str, Any] = get_down_block(
SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
self.down_blocks.append(SCREAMING_SNAKE_CASE )
# mid
lowercase__ : Optional[int] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
# out
lowercase__ : int = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Tuple = 2 * out_channels if double_z else out_channels
lowercase__ : Tuple = nn.Convad(block_out_channels[-1] , SCREAMING_SNAKE_CASE , 3 , padding=1 )
lowercase__ : Tuple = False
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : List[str] = x
lowercase__ : Tuple = self.conv_in(SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE : Union[str, Any] ):
def custom_forward(*SCREAMING_SNAKE_CASE : Dict ):
return module(*SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
lowercase__ : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
# middle
lowercase__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
# middle
lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
lowercase__ : Any = down_block(SCREAMING_SNAKE_CASE )
# middle
lowercase__ : List[str] = self.mid_block(SCREAMING_SNAKE_CASE )
# post-process
lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self.conv_act(SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.conv_out(SCREAMING_SNAKE_CASE )
return sample
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Optional[int]=("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE : int=(64,) , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : str="silu" , SCREAMING_SNAKE_CASE : Any="group" , ):
super().__init__()
lowercase__ : List[str] = layers_per_block
lowercase__ : int = nn.Convad(
SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Optional[Any] = None
lowercase__ : Dict = nn.ModuleList([] )
lowercase__ : List[str] = in_channels if norm_type == "spatial" else None
# mid
lowercase__ : str = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
# up
lowercase__ : Tuple = list(reversed(SCREAMING_SNAKE_CASE ) )
lowercase__ : Dict = reversed_block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ : Tuple = output_channel
lowercase__ : List[Any] = reversed_block_out_channels[i]
lowercase__ : List[Any] = i == len(SCREAMING_SNAKE_CASE ) - 1
lowercase__ : Dict = get_up_block(
SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , prev_output_channel=SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , resnet_time_scale_shift=SCREAMING_SNAKE_CASE , )
self.up_blocks.append(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = output_channel
# out
if norm_type == "spatial":
lowercase__ : Any = SpatialNorm(block_out_channels[0] , SCREAMING_SNAKE_CASE )
else:
lowercase__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Any = nn.Convad(block_out_channels[0] , SCREAMING_SNAKE_CASE , 3 , padding=1 )
lowercase__ : List[Any] = False
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str=None ):
lowercase__ : Tuple = z
lowercase__ : List[str] = self.conv_in(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE : List[str] ):
def custom_forward(*SCREAMING_SNAKE_CASE : Optional[int] ):
return module(*SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
lowercase__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
lowercase__ : str = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
else:
# middle
lowercase__ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# middle
lowercase__ : Optional[int] = self.mid_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : Optional[Any] = up_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE )
else:
lowercase__ : Dict = self.conv_norm_out(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.conv_act(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = self.conv_out(SCREAMING_SNAKE_CASE )
return sample
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : List[Any]="random" , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : int=True ):
super().__init__()
lowercase__ : List[Any] = n_e
lowercase__ : List[str] = vq_embed_dim
lowercase__ : Optional[Any] = beta
lowercase__ : List[str] = legacy
lowercase__ : Tuple = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowercase__ : Union[str, Any] = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
lowercase__ : Tuple = self.used.shape[0]
lowercase__ : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowercase__ : Any = self.re_embed
lowercase__ : Tuple = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
lowercase__ : str = n_e
lowercase__ : Union[str, Any] = sane_index_shape
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Any = inds.shape
assert len(SCREAMING_SNAKE_CASE ) > 1
lowercase__ : List[str] = inds.reshape(ishape[0] , -1 )
lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = (inds[:, :, None] == used[None, None, ...]).long()
lowercase__ : Dict = match.argmax(-1 )
lowercase__ : Dict = match.sum(2 ) < 1
if self.unknown_index == "random":
lowercase__ : Optional[Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowercase__ : List[Any] = self.unknown_index
return new.reshape(SCREAMING_SNAKE_CASE )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int ):
lowercase__ : List[Any] = inds.shape
assert len(SCREAMING_SNAKE_CASE ) > 1
lowercase__ : Optional[int] = inds.reshape(ishape[0] , -1 )
lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
lowercase__ : int = 0 # simply set to zero
lowercase__ : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , SCREAMING_SNAKE_CASE )
return back.reshape(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
# reshape z -> (batch, height, width, channel) and flatten
lowercase__ : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowercase__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowercase__ : Optional[Any] = torch.argmin(torch.cdist(SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
lowercase__ : List[str] = self.embedding(SCREAMING_SNAKE_CASE ).view(z.shape )
lowercase__ : Dict = None
lowercase__ : int = None
# compute loss for embedding
if not self.legacy:
lowercase__ : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowercase__ : List[str] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowercase__ : Union[str, Any] = z + (z_q - z).detach()
# reshape back to match original input shape
lowercase__ : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowercase__ : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowercase__ : int = self.remap_to_used(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowercase__ : List[str] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowercase__ : Union[str, Any] = indices.reshape(shape[0] , -1 ) # add batch axis
lowercase__ : Union[str, Any] = self.unmap_to_all(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowercase__ : List[Any] = self.embedding(SCREAMING_SNAKE_CASE )
if shape is not None:
lowercase__ : Any = z_q.view(SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
lowercase__ : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str=False ):
lowercase__ : Dict = parameters
lowercase__ , lowercase__ : Optional[int] = torch.chunk(SCREAMING_SNAKE_CASE , 2 , dim=1 )
lowercase__ : Optional[Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
lowercase__ : Optional[int] = deterministic
lowercase__ : Tuple = torch.exp(0.5 * self.logvar )
lowercase__ : Optional[int] = torch.exp(self.logvar )
if self.deterministic:
lowercase__ : Any = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
lowercase__ : Tuple = randn_tensor(
self.mean.shape , generator=SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
lowercase__ : str = self.mean + self.std * sample
return x
def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[str]=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
lowercase__ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
return self.mean
| 81 | 1 |
from itertools import count
def UpperCamelCase ( __lowercase : Dict = 50 ):
'''simple docstring'''
A_ : Union[str, Any] = [1] * min_block_length
for n in count(__lowercase ):
fill_count_functions.append(1 )
for block_length in range(__lowercase ,n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 558 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ ={
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ =['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ =[
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ =[
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ =[
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__magic_name__ =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 415 | 0 |
'''simple docstring'''
from PIL import Image
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
def brightness(a__ ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(a__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 58 | '''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 58 | 1 |
"""simple docstring"""
from __future__ import annotations
from scipy.special import comb # type: ignore
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
UpperCamelCase = len(lowerCamelCase_) - 1
def UpperCAmelCase__ ( self , lowerCamelCase_) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCamelCase = []
for i in range(len(self.list_of_points)):
# basis function for each i
output_values.append(
comb(self.degree , lowerCamelCase_) * ((1 - t) ** (self.degree - i)) * (t**i))
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowerCamelCase_) , 5) == 1
return output_values
def UpperCAmelCase__ ( self , lowerCamelCase_) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCamelCase = self.basis_function(lowerCamelCase_)
UpperCamelCase = 0.0
UpperCamelCase = 0.0
for i in range(len(self.list_of_points)):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def UpperCAmelCase__ ( self , lowerCamelCase_ = 0.01) -> List[Any]:
from matplotlib import pyplot as plt # type: ignore
UpperCamelCase = [] # x coordinates of points to plot
UpperCamelCase = [] # y coordinates of points to plot
UpperCamelCase = 0.0
while t <= 1:
UpperCamelCase = self.bezier_curve_function(lowerCamelCase_)
to_plot_x.append(value[0])
to_plot_y.append(value[1])
t += step_size
UpperCamelCase = [i[0] for i in self.list_of_points]
UpperCamelCase = [i[1] for i in self.list_of_points]
plt.plot(
lowerCamelCase_ , lowerCamelCase_ , color='''blue''' , label='''Curve of Degree ''' + str(self.degree) , )
plt.scatter(lowerCamelCase_ , lowerCamelCase_ , color='''red''' , label='''Control Points''')
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | 34 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class a ( UpperCAmelCase ):
_lowercase = "conditional_detr"
_lowercase = ["past_key_values"]
_lowercase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , A_=True , A_=None , A_=3 , A_=300 , A_=6 , A_=2048 , A_=8 , A_=6 , A_=2048 , A_=8 , A_=0.0 , A_=0.0 , A_=True , A_="relu" , A_=256 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=1.0 , A_=False , A_="sine" , A_="resnet50" , A_=True , A_=False , A_=2 , A_=5 , A_=2 , A_=1 , A_=1 , A_=2 , A_=5 , A_=2 , A_=0.25 , **A_ , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(A_ , A_ ):
_UpperCAmelCase : Optional[Any] = backbone_config.get("model_type" )
_UpperCAmelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase : Dict = config_class.from_dict(A_ )
_UpperCAmelCase : Any = use_timm_backbone
_UpperCAmelCase : List[Any] = backbone_config
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : int = num_queries
_UpperCAmelCase : Union[str, Any] = d_model
_UpperCAmelCase : Dict = encoder_ffn_dim
_UpperCAmelCase : Any = encoder_layers
_UpperCAmelCase : List[str] = encoder_attention_heads
_UpperCAmelCase : Optional[int] = decoder_ffn_dim
_UpperCAmelCase : str = decoder_layers
_UpperCAmelCase : Optional[Any] = decoder_attention_heads
_UpperCAmelCase : Optional[int] = dropout
_UpperCAmelCase : List[Any] = attention_dropout
_UpperCAmelCase : List[Any] = activation_dropout
_UpperCAmelCase : List[str] = activation_function
_UpperCAmelCase : Optional[int] = init_std
_UpperCAmelCase : List[Any] = init_xavier_std
_UpperCAmelCase : Optional[int] = encoder_layerdrop
_UpperCAmelCase : List[str] = decoder_layerdrop
_UpperCAmelCase : Optional[int] = encoder_layers
_UpperCAmelCase : Union[str, Any] = auxiliary_loss
_UpperCAmelCase : str = position_embedding_type
_UpperCAmelCase : str = backbone
_UpperCAmelCase : int = use_pretrained_backbone
_UpperCAmelCase : Optional[int] = dilation
# Hungarian matcher
_UpperCAmelCase : Optional[int] = class_cost
_UpperCAmelCase : Tuple = bbox_cost
_UpperCAmelCase : Dict = giou_cost
# Loss coefficients
_UpperCAmelCase : Any = mask_loss_coefficient
_UpperCAmelCase : int = dice_loss_coefficient
_UpperCAmelCase : Any = cls_loss_coefficient
_UpperCAmelCase : Any = bbox_loss_coefficient
_UpperCAmelCase : Optional[int] = giou_loss_coefficient
_UpperCAmelCase : List[Any] = focal_alpha
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.d_model
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCAmelCase : Tuple = self.backbone_config.to_dict()
_UpperCAmelCase : Tuple = self.__class__.model_type
return output
class a ( UpperCAmelCase ):
_lowercase = version.parse("1.11" )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return 1e-5
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return 12
| 300 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
a__ = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
a__ = {
"""169M""": 7_68,
"""430M""": 10_24,
"""1B5""": 20_48,
"""3B""": 25_60,
"""7B""": 40_96,
"""14B""": 51_20,
}
def lowercase ( SCREAMING_SNAKE_CASE__ : Any ) -> int:
_snake_case : Dict = list(state_dict.keys() )
for name in state_dict_keys:
_snake_case : Any = state_dict.pop(SCREAMING_SNAKE_CASE__ )
# emb -> embedding
if name.startswith("""emb.""" ):
_snake_case : List[str] = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
_snake_case : List[str] = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
_snake_case : List[str] = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , SCREAMING_SNAKE_CASE__ )
# ffn -> feed_forward
_snake_case : int = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , SCREAMING_SNAKE_CASE__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
_snake_case : Union[str, Any] = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
_snake_case : str = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
_snake_case : Optional[int] = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
_snake_case : List[str] = """rwkv.""" + name
_snake_case : Optional[Any] = weight
return state_dict
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : int=None ) -> str:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
_snake_case : Dict = 50_277
_snake_case : Optional[int] = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
_snake_case : Any = PreTrainedTokenizerFast(tokenizer_file=SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
# 2. Build the config
_snake_case : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_snake_case : int = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(F'''`size` should be one of {possible_sizes}, got {size}.''' )
_snake_case : Union[str, Any] = RwkvConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(SCREAMING_SNAKE_CASE__ )
# 3. Download model file then convert state_dict
_snake_case : Any = hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
_snake_case : Dict = convert_state_dict(SCREAMING_SNAKE_CASE__ )
# 4. Split in shards and save
_snake_case : int = shard_checkpoint(SCREAMING_SNAKE_CASE__ )
for shard_file, shard in shards.items():
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if index is not None:
_snake_case : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save the index as well
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
_snake_case : Union[str, Any] = json.dumps(SCREAMING_SNAKE_CASE__ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ ) + """\n"""
f.write(SCREAMING_SNAKE_CASE__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
_snake_case : Tuple = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_snake_case : List[str] = torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
_snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , max_shard_size="""2GB""" )
tokenizer.push_to_hub(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
a__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 710 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a__ = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a__ = {
# fairseq:
"""wmt19-ru-en""": {"""length_penalty""": 1.1},
"""wmt19-en-ru""": {"""length_penalty""": 1.15},
"""wmt19-en-de""": {"""length_penalty""": 1.0},
"""wmt19-de-en""": {"""length_penalty""": 1.1},
# allenai:
"""wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-12-1""": {"""length_penalty""": 0.8},
"""wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6},
"""wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6},
}
# this remaps the different models to their organization names
a__ = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a__ = """facebook"""
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a__ = """allenai"""
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
_snake_case : Union[str, Any] = dict((re.sub(R"""@@$""" , """""" , SCREAMING_SNAKE_CASE__ ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , SCREAMING_SNAKE_CASE__ ), v) for k, v in d.items() )
_snake_case : int = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
_snake_case : Tuple = d[k] # restore
return da
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str ) -> str:
# prep
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
_snake_case : Optional[Any] = basename(SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = dirname(SCREAMING_SNAKE_CASE__ )
_snake_case : int = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
_snake_case : List[str] = cls.hub_models()
_snake_case : Tuple = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
_snake_case : Dict = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'''using checkpoint {checkpoint_file}''' )
_snake_case : List[Any] = hub_utils.from_pretrained(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , archive_map=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = vars(chkpt["""args"""]["""model"""] )
_snake_case : Union[str, Any] = args["""source_lang"""]
_snake_case : Tuple = args["""target_lang"""]
_snake_case : Any = dirname(SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[int] = basename(SCREAMING_SNAKE_CASE__ )
# dicts
_snake_case : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , F'''dict.{src_lang}.txt''' )
_snake_case : Any = os.path.join(SCREAMING_SNAKE_CASE__ , F'''dict.{tgt_lang}.txt''' )
_snake_case : List[Any] = Dictionary.load(SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[Any] = rewrite_dict_keys(src_dict.indices )
_snake_case : Dict = len(SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab-src.json""" )
print(F'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
_snake_case : str = True
for k in src_vocab.keys():
if not k.islower():
_snake_case : Any = False
break
_snake_case : Union[str, Any] = Dictionary.load(SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
_snake_case : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab-tgt.json""" )
print(F'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# merges_file (bpecodes)
_snake_case : str = os.path.join(SCREAMING_SNAKE_CASE__ , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
_snake_case : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
break
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""" ) as fin:
_snake_case : Dict = fin.read()
_snake_case : Optional[Any] = re.sub(R""" \d+$""" , """""" , SCREAMING_SNAKE_CASE__ , 0 , re.M ) # remove frequency number
print(F'''Generating {merges_file}''' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as fout:
fout.write(SCREAMING_SNAKE_CASE__ )
# model config
_snake_case : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", F'''need to extend tokenizer to support bpe={args['tokenizer']}'''
_snake_case : Optional[int] = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.0_2,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
_snake_case : Tuple = 5
_snake_case : int = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
_snake_case : List[str] = best_score_hparams[model_dir]["""length_penalty"""]
else:
_snake_case : Optional[Any] = 1.0
print(F'''Generating {fsmt_model_config_file}''' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# tokenizer config
_snake_case : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : str = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1_024,
"""do_lower_case""": do_lower_case,
}
print(F'''Generating {fsmt_tokenizer_config_file}''' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# model
_snake_case : Optional[Any] = chkpt["""models"""][0]
_snake_case : List[str] = model.state_dict()
# rename keys to start with 'model.'
_snake_case : Any = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
_snake_case : Union[str, Any] = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[int] = FSMTConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = FSMTForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
# check that it loads ok
model_new.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
# save
_snake_case : int = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(F'''cd {data_root}''' )
print(F'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fsmt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a__ = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 198 | 0 |
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : int ) -> list:
UpperCAmelCase : Union[str, Any] = int(_lowerCAmelCase )
if n_element < 1:
UpperCAmelCase : int = ValueError('''a should be a positive number''' )
raise my_error
UpperCAmelCase : str = [1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = (0, 0, 0)
UpperCAmelCase : Any = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
UpperCamelCase__: List[str] = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
UpperCamelCase__: str = hamming(int(n))
print("-----------------------------------------------------")
print(F"The list with nth numbers is: {hamming_numbers}")
print("-----------------------------------------------------")
| 127 |
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : int ) -> list:
UpperCAmelCase : Union[str, Any] = int(_lowerCAmelCase )
if n_element < 1:
UpperCAmelCase : int = ValueError('''a should be a positive number''' )
raise my_error
UpperCAmelCase : str = [1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = (0, 0, 0)
UpperCAmelCase : Any = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
UpperCamelCase__: List[str] = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
UpperCamelCase__: str = hamming(int(n))
print("-----------------------------------------------------")
print(F"The list with nth numbers is: {hamming_numbers}")
print("-----------------------------------------------------")
| 127 | 1 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
SCREAMING_SNAKE_CASE__ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
SCREAMING_SNAKE_CASE__ = {
'abeja/gpt-neox-japanese-2.7b': 2_0_4_8,
}
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> int:
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = collections.OrderedDict()
UpperCamelCase = collections.OrderedDict()
UpperCamelCase = collections.OrderedDict()
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(__UpperCamelCase ):
UpperCamelCase = b
UpperCamelCase = idx
for wd in b:
UpperCamelCase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class a_ ( lowerCamelCase ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|startoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , do_clean_text=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
if not os.path.isfile(_SCREAMING_SNAKE_CASE ):
raise ValueError(
F"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(_SCREAMING_SNAKE_CASE ):
raise ValueError(
F"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
UpperCamelCase = do_clean_text
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = load_vocab_and_emoji(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def A__ ( self ) -> List[str]:
"""simple docstring"""
return len(self.raw_vocab )
def A__ ( self ) -> Any:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(_SCREAMING_SNAKE_CASE , clean=self.do_clean_text )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self.vocab.get(_SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = """""".join(_SCREAMING_SNAKE_CASE ).strip()
return out_string
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[int]:
"""simple docstring"""
UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) + [self.eos_token_id] )
if len(_SCREAMING_SNAKE_CASE ) > self.model_max_length:
UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
UpperCamelCase = 0
if os.path.isdir(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
UpperCamelCase = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
UpperCamelCase = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
""" Please check that the vocabulary is not corrupted!""" )
UpperCamelCase = token_index
writer.write(""",""".join(_SCREAMING_SNAKE_CASE ) + """\n""" )
index += 1
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , _SCREAMING_SNAKE_CASE )
return vocab_file, emoji_file
class a_ ( lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase = vocab # same as swe
UpperCamelCase = ids_to_tokens # same as bpe
UpperCamelCase = emoji
UpperCamelCase = np.max([len(_SCREAMING_SNAKE_CASE ) for w in self.vocab.keys()] )
UpperCamelCase = re.compile(R"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
UpperCamelCase = re.compile(R"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
UpperCamelCase = re.compile(R"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
UpperCamelCase = re.compile(
R"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
UpperCamelCase = re.compile(
R"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
UpperCamelCase = re.compile(
R"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
UpperCamelCase = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
UpperCamelCase = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
UpperCamelCase = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self ) -> List[Any]:
"""simple docstring"""
return len(self.ids_to_tokens )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.content_repattera.sub("""<URL>""" , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.content_repattera.sub("""<EMAIL>""" , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.content_repattera.sub("""<TEL>""" , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.content_repattera.sub("""<DATE>""" , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.content_repattera.sub("""<DATE>""" , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.content_repattera.sub("""<PRICE>""" , _SCREAMING_SNAKE_CASE )
UpperCamelCase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCamelCase = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> int:
"""simple docstring"""
UpperCamelCase = text.replace(""" """ , """<SP>""" )
UpperCamelCase = text.replace(""" """ , """<SP>""" )
UpperCamelCase = text.replace("""\r\n""" , """<BR>""" )
UpperCamelCase = text.replace("""\n""" , """<BR>""" )
UpperCamelCase = text.replace("""\r""" , """<BR>""" )
UpperCamelCase = text.replace("""\t""" , """<TAB>""" )
UpperCamelCase = text.replace("""—""" , """ー""" )
UpperCamelCase = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCamelCase = text.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if clean:
UpperCamelCase = self.clean_text(_SCREAMING_SNAKE_CASE )
def check_simbol(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = x.encode()
if len(_SCREAMING_SNAKE_CASE ) == 1 and len(_SCREAMING_SNAKE_CASE ) == 2:
UpperCamelCase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xC2_A1 and c <= 0xC2_BF)
or (c >= 0xC7_80 and c <= 0xC7_83)
or (c >= 0xCA_B9 and c <= 0xCB_BF)
or (c >= 0xCC_80 and c <= 0xCD_A2)
):
return True
return False
def checkuae(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = x.encode()
if len(_SCREAMING_SNAKE_CASE ) == 1 and len(_SCREAMING_SNAKE_CASE ) == 3:
UpperCamelCase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xE2_80_80 and c <= 0xE2_B0_7F:
return True
return False
UpperCamelCase = 0
UpperCamelCase = []
while pos < len(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = min(len(_SCREAMING_SNAKE_CASE ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
UpperCamelCase = [] # (token_id, token, pos)
for e in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 ):
UpperCamelCase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_SCREAMING_SNAKE_CASE ) > 2:
UpperCamelCase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
# the smallest token_id is adopted
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x[0] )[0]
result.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = e
else:
UpperCamelCase = pos + 1
UpperCamelCase = text[pos:end]
if check_simbol(_SCREAMING_SNAKE_CASE ):
result.append("""<KIGOU>""" )
elif checkuae(_SCREAMING_SNAKE_CASE ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
UpperCamelCase = end
return result
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="\n" ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_SCREAMING_SNAKE_CASE ) > 0:
words.append(bytearray(_SCREAMING_SNAKE_CASE ).decode("""utf-8""" , errors="""replace""" ) )
UpperCamelCase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(_SCREAMING_SNAKE_CASE )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
words.append(bytearray(_SCREAMING_SNAKE_CASE ).decode("""utf-8""" , errors="""replace""" ) )
UpperCamelCase = """""".join(_SCREAMING_SNAKE_CASE )
return text
| 35 |
'''simple docstring'''
from __future__ import annotations
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> tuple[float, list[float]]:
UpperCamelCase = list(range(len(__UpperCamelCase ) ) )
UpperCamelCase = [v / w for v, w in zip(__UpperCamelCase , __UpperCamelCase )]
index.sort(key=lambda __UpperCamelCase : ratio[i] , reverse=__UpperCamelCase )
UpperCamelCase = 0
UpperCamelCase = [0] * len(__UpperCamelCase )
for i in index:
if weight[i] <= capacity:
UpperCamelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCamelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
"""simple docstring"""
from __future__ import annotations
class __a :
"""simple docstring"""
def __init__( self , snake_case = 0 ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = key
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case ):
"""simple docstring"""
assert isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case )
lowerCAmelCase__ : str = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(snake_case ) ^ key ) for ch in content]
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case ):
"""simple docstring"""
assert isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case )
lowerCAmelCase__ : List[str] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(snake_case ) ^ key ) for ch in content]
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case = 0 ):
"""simple docstring"""
assert isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case )
lowerCAmelCase__ : Optional[int] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCAmelCase__ : List[Any] = ""
for ch in content:
ans += chr(ord(snake_case ) ^ key )
return ans
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case = 0 ):
"""simple docstring"""
assert isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case )
lowerCAmelCase__ : str = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCAmelCase__ : Any = ""
for ch in content:
ans += chr(ord(snake_case ) ^ key )
return ans
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case = 0 ):
"""simple docstring"""
assert isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case )
try:
with open(snake_case ) as fin, open("encrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(snake_case , snake_case ) )
except OSError:
return False
return True
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case ):
"""simple docstring"""
assert isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case )
try:
with open(snake_case ) as fin, open("decrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(snake_case , snake_case ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 453 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __a ( __magic_name__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = 'beit'
def __init__( self , snake_case=8_192 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3_072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1e-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=False , snake_case=False , snake_case=False , snake_case=False , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=[3, 5, 7, 11] , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ):
"""simple docstring"""
super().__init__(**snake_case )
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : List[str] = intermediate_size
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : str = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : Dict = layer_norm_eps
lowerCAmelCase__ : int = image_size
lowerCAmelCase__ : Union[str, Any] = patch_size
lowerCAmelCase__ : Dict = num_channels
lowerCAmelCase__ : Optional[Any] = use_mask_token
lowerCAmelCase__ : Dict = use_absolute_position_embeddings
lowerCAmelCase__ : Any = use_relative_position_bias
lowerCAmelCase__ : List[Any] = use_shared_relative_position_bias
lowerCAmelCase__ : Dict = layer_scale_init_value
lowerCAmelCase__ : Optional[int] = drop_path_rate
lowerCAmelCase__ : Optional[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCAmelCase__ : Optional[int] = out_indices
lowerCAmelCase__ : List[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase__ : List[Any] = use_auxiliary_head
lowerCAmelCase__ : Optional[int] = auxiliary_loss_weight
lowerCAmelCase__ : List[str] = auxiliary_channels
lowerCAmelCase__ : Optional[Any] = auxiliary_num_convs
lowerCAmelCase__ : Union[str, Any] = auxiliary_concat_input
lowerCAmelCase__ : List[str] = semantic_loss_ignore_index
class __a ( __magic_name__ ):
"""simple docstring"""
__UpperCamelCase : List[str] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return 1e-4
| 453 | 1 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--src_path""" , type=snake_case__ , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , )
parser.add_argument(
"""--evaluation_set""" , type=snake_case__ , help="""where to store parsed evaluation_set file""" , )
parser.add_argument(
"""--gold_data_path""" , type=snake_case__ , help="""where to store parsed gold_data_path file""" , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open(
args.gold_data_path , """w""" ) as gold_file:
SCREAMING_SNAKE_CASE__ = json.load(snake_case__ )
for dpr_record in tqdm(snake_case__ ):
SCREAMING_SNAKE_CASE__ = dpr_record["""question"""]
SCREAMING_SNAKE_CASE__ = [context["""title"""] for context in dpr_record["""positive_ctxs"""]]
eval_file.write(question + """\n""" )
gold_file.write("""\t""".join(snake_case__ ) + """\n""" )
if __name__ == "__main__":
main()
| 616 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase (unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = ort.SessionOptions()
SCREAMING_SNAKE_CASE__ = False
return options
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE__ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = """A red cat sitting on a park bench"""
SCREAMING_SNAKE_CASE__ = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__ = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_5 , generator=__UpperCAmelCase , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 616 | 1 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"artists_file": "artists.json",
"lyrics_file": "lyrics.json",
"genres_file": "genres.json",
}
__magic_name__ = {
"artists_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json",
},
"genres_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json",
},
"lyrics_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json",
},
}
__magic_name__ = {
"jukebox": 512,
}
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_LYRIC_TOKENS_SIZES
__SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case=["v3", "v2", "v2"] , _snake_case=512 , _snake_case=5 , _snake_case="<|endoftext|>" , **_snake_case , ) -> str:
"""simple docstring"""
UpperCAmelCase = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else unk_token
super().__init__(
unk_token=_snake_case , n_genres=_snake_case , version=_snake_case , max_n_lyric_tokens=_snake_case , **_snake_case , )
UpperCAmelCase = version
UpperCAmelCase = max_n_lyric_tokens
UpperCAmelCase = n_genres
with open(_snake_case , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase = json.load(_snake_case )
with open(_snake_case , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase = json.load(_snake_case )
with open(_snake_case , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase = json.load(_snake_case )
UpperCAmelCase = R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
UpperCAmelCase = oov.replace(R'''\-\'''' , R'''\-+\'''' )
UpperCAmelCase = regex.compile(_snake_case )
UpperCAmelCase = {v: k for k, v in self.artists_encoder.items()}
UpperCAmelCase = {v: k for k, v in self.genres_encoder.items()}
UpperCAmelCase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def snake_case_ ( self ) -> str:
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def snake_case_ ( self ) -> Any:
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = [self.artists_encoder.get(_snake_case , 0 ) for artist in list_artists]
for genres in range(len(_snake_case ) ):
UpperCAmelCase = [self.genres_encoder.get(_snake_case , 0 ) for genre in list_genres[genres]]
UpperCAmelCase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
UpperCAmelCase = [[self.lyrics_encoder.get(_snake_case , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def snake_case_ ( self , _snake_case ) -> Optional[Any]:
"""simple docstring"""
return list(_snake_case )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , **_snake_case ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.prepare_for_tokenization(_snake_case , _snake_case , _snake_case )
UpperCAmelCase = self._tokenize(_snake_case )
return artist, genre, lyrics
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case = False ) -> Tuple[str, str, str, Dict[str, Any]]:
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
UpperCAmelCase = artists[idx].lower()
UpperCAmelCase = [genres[idx].lower()]
else:
UpperCAmelCase = self._normalize(artists[idx] ) + '''.v2'''
UpperCAmelCase = [
self._normalize(_snake_case ) + '''.v2''' for genre in genres[idx].split('''_''' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
UpperCAmelCase = regex.compile(R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' )
UpperCAmelCase = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
UpperCAmelCase = {vocab[index]: index + 1 for index in range(len(_snake_case ) )}
UpperCAmelCase = 0
UpperCAmelCase = len(_snake_case ) + 1
UpperCAmelCase = self.vocab
UpperCAmelCase = {v: k for k, v in self.vocab.items()}
UpperCAmelCase = ''''''
else:
UpperCAmelCase = regex.compile(R'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' )
UpperCAmelCase = self._run_strip_accents(_snake_case )
UpperCAmelCase = lyrics.replace('''\\''' , '''\n''' )
UpperCAmelCase = self.out_of_vocab.sub('''''' , _snake_case ), [], []
return artists, genres, lyrics
def snake_case_ ( self , _snake_case ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = unicodedata.normalize('''NFD''' , _snake_case )
UpperCAmelCase = []
for char in text:
UpperCAmelCase = unicodedata.category(_snake_case )
if cat == "Mn":
continue
output.append(_snake_case )
return "".join(_snake_case )
def snake_case_ ( self , _snake_case ) -> str:
"""simple docstring"""
UpperCAmelCase = (
[chr(_snake_case ) for i in range(ord('''a''' ) , ord('''z''' ) + 1 )]
+ [chr(_snake_case ) for i in range(ord('''A''' ) , ord('''Z''' ) + 1 )]
+ [chr(_snake_case ) for i in range(ord('''0''' ) , ord('''9''' ) + 1 )]
+ ['''.''']
)
UpperCAmelCase = frozenset(_snake_case )
UpperCAmelCase = re.compile(R'''_+''' )
UpperCAmelCase = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] )
UpperCAmelCase = pattern.sub('''_''' , _snake_case ).strip('''_''' )
return text
def snake_case_ ( self , _snake_case ) -> str:
"""simple docstring"""
return " ".join(_snake_case )
def snake_case_ ( self , _snake_case , _snake_case = None , _snake_case = False ) -> Optional[Any]:
"""simple docstring"""
# Convert to TensorType
if not isinstance(_snake_case , _snake_case ):
UpperCAmelCase = TensorType(_snake_case )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' )
import tensorflow as tf
UpperCAmelCase = tf.constant
UpperCAmelCase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' )
import torch
UpperCAmelCase = torch.tensor
UpperCAmelCase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' )
import jax.numpy as jnp # noqa: F811
UpperCAmelCase = jnp.array
UpperCAmelCase = _is_jax
else:
UpperCAmelCase = np.asarray
UpperCAmelCase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
UpperCAmelCase = [inputs]
if not is_tensor(_snake_case ):
UpperCAmelCase = as_tensor(_snake_case )
except: # noqa E722
raise ValueError(
'''Unable to create tensor, you should probably activate truncation and/or padding '''
'''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' )
return inputs
def __call__( self , _snake_case , _snake_case , _snake_case="" , _snake_case="pt" ) -> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = [0, 0, 0]
UpperCAmelCase = [artist] * len(self.version )
UpperCAmelCase = [genres] * len(self.version )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.tokenize(_snake_case , _snake_case , _snake_case )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._convert_token_to_id(_snake_case , _snake_case , _snake_case )
UpperCAmelCase = [-INFINITY] * len(full_tokens[-1] )
UpperCAmelCase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_snake_case )
for i in range(len(self.version ) )
]
return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} )
def snake_case_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_snake_case ) )
UpperCAmelCase = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_snake_case ) )
UpperCAmelCase = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_snake_case ) )
return (artists_file, genres_file, lyrics_file)
def snake_case_ ( self , _snake_case , _snake_case , _snake_case ) -> str:
"""simple docstring"""
UpperCAmelCase = self.artists_decoder.get(_snake_case )
UpperCAmelCase = [self.genres_decoder.get(_snake_case ) for genre in genres_index]
UpperCAmelCase = [self.lyrics_decoder.get(_snake_case ) for character in lyric_index]
return artist, genres, lyrics
| 254 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
__magic_name__ = logging.get_logger(__name__)
class lowercase ( A__ ):
'''simple docstring'''
def __init__( self , *_snake_case , **_snake_case ) -> None:
"""simple docstring"""
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 254 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( a ) -> YolosConfig:
'''simple docstring'''
__magic_name__ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__magic_name__ = 192
__magic_name__ = 768
__magic_name__ = 12
__magic_name__ = 3
__magic_name__ = [800, 1333]
__magic_name__ = False
elif yolos_name == "yolos_s_dWr":
__magic_name__ = 330
__magic_name__ = 14
__magic_name__ = 6
__magic_name__ = 1320
elif "yolos_s" in yolos_name:
__magic_name__ = 384
__magic_name__ = 1536
__magic_name__ = 12
__magic_name__ = 6
elif "yolos_b" in yolos_name:
__magic_name__ = [800, 1344]
__magic_name__ = 91
__magic_name__ = '''huggingface/label-files'''
__magic_name__ = '''coco-detection-id2label.json'''
__magic_name__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__magic_name__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__magic_name__ = idalabel
__magic_name__ = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( a , a , a = False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__magic_name__ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
__magic_name__ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ = in_proj_weight[: config.hidden_size, :]
__magic_name__ = in_proj_bias[: config.hidden_size]
__magic_name__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__magic_name__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__magic_name__ = in_proj_weight[-config.hidden_size :, :]
__magic_name__ = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( a ) -> str:
'''simple docstring'''
if "backbone" in name:
__magic_name__ = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
__magic_name__ = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
__magic_name__ = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
__magic_name__ = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
__magic_name__ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
__magic_name__ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
__magic_name__ = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
__magic_name__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__magic_name__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__magic_name__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__magic_name__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__magic_name__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__magic_name__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
__magic_name__ = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
__magic_name__ = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
__magic_name__ = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def UpperCamelCase ( a , a ) -> dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__magic_name__ = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
__magic_name__ = key.split('''.''' )
__magic_name__ = int(key_split[2] )
__magic_name__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__magic_name__ = val[:dim, :]
__magic_name__ = val[
dim : dim * 2, :
]
__magic_name__ = val[-dim:, :]
else:
__magic_name__ = val[:dim]
__magic_name__ = val[dim : dim * 2]
__magic_name__ = val[-dim:]
else:
__magic_name__ = val
return orig_state_dict
def UpperCamelCase ( ) -> torch.Tensor:
'''simple docstring'''
__magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( a , a , a , a = False ) -> List[str]:
'''simple docstring'''
__magic_name__ = get_yolos_config(__UpperCamelCase )
# load original state_dict
__magic_name__ = torch.load(__UpperCamelCase , map_location='''cpu''' )['''model''']
# load 🤗 model
__magic_name__ = YolosForObjectDetection(__UpperCamelCase )
model.eval()
__magic_name__ = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
__magic_name__ = 800 if yolos_name != '''yolos_ti''' else 512
__magic_name__ = YolosImageProcessor(format='''coco_detection''' , size=__UpperCamelCase )
__magic_name__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
__magic_name__ = model(**__UpperCamelCase )
__magic_name__ , __magic_name__ = outputs.logits, outputs.pred_boxes
__magic_name__ , __magic_name__ = None, None
if yolos_name == "yolos_ti":
__magic_name__ = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
__magic_name__ = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
__magic_name__ = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
__magic_name__ = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
__magic_name__ = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
__magic_name__ = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
__magic_name__ = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
__magic_name__ = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
__magic_name__ = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
__magic_name__ = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1e-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
__magic_name__ = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
__magic_name__ = model_mapping[yolos_name]
image_processor.push_to_hub(__UpperCamelCase , organization='''hustvl''' )
model.push_to_hub(__UpperCamelCase , organization='''hustvl''' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',"
" \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCAmelCase = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 708 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase ( a ) -> bool:
'''simple docstring'''
return len(set(a ) ) == len(a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245 | 0 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _UpperCamelCase ( ) -> None:
"""simple docstring"""
print("Making key files..." )
make_key_files("rsa" , 1024 )
print("Key files generation successful." )
def _UpperCamelCase ( UpperCamelCase ) -> tuple[tuple[int, int], tuple[int, int]]:
"""simple docstring"""
print("Generating prime p..." )
__UpperCAmelCase : Any = rabinMiller.generate_large_prime(UpperCamelCase )
print("Generating prime q..." )
__UpperCAmelCase : Any = rabinMiller.generate_large_prime(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
__UpperCAmelCase : Union[str, Any] = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(UpperCamelCase , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
__UpperCAmelCase : Optional[Any] = cryptoMath.find_mod_inverse(UpperCamelCase , (p - 1) * (q - 1) )
__UpperCAmelCase : Any = (n, e)
__UpperCAmelCase : List[str] = (n, d)
return (public_key, private_key)
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> None:
"""simple docstring"""
if os.path.exists(f"{name}_pubkey.txt" ) or os.path.exists(f"{name}_privkey.txt" ):
print("\nWARNING:" )
print(
f"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
"Use a different name or delete these files and re-run this program." )
sys.exit()
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = generate_key(UpperCamelCase )
print(f"\nWriting public key to file {name}_pubkey.txt..." )
with open(f"{name}_pubkey.txt" , "w" ) as out_file:
out_file.write(f"{key_size},{public_key[0]},{public_key[1]}" )
print(f"Writing private key to file {name}_privkey.txt..." )
with open(f"{name}_privkey.txt" , "w" ) as out_file:
out_file.write(f"{key_size},{private_key[0]},{private_key[1]}" )
if __name__ == "__main__":
main()
| 77 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A = logging.get_logger(__name__)
A = {"""vocab_file""": """spiece.model"""}
A = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
A = {"""bert_for_seq_generation""": 512}
class a__ ( __magic_name__ ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = []
lowercase_ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str]="<s>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Optional[int]="<unk>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="<::::>" , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : List[Any] , ):
"""simple docstring"""
__UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__UpperCAmelCase : Dict = vocab_file
__UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(UpperCamelCase_)
@property
def a_ ( self : List[str]):
"""simple docstring"""
return self.sp_model.get_piece_size()
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : int = {self.convert_ids_to_tokens(UpperCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : int):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = self.__dict__.copy()
__UpperCAmelCase : List[Any] = None
return state
def __setstate__( self : Optional[Any] , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def a_ ( self : Any , UpperCamelCase_ : str):
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_)
def a_ ( self : Optional[Any] , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
return self.sp_model.piece_to_id(UpperCamelCase_)
def a_ ( self : Tuple , UpperCamelCase_ : int):
"""simple docstring"""
__UpperCAmelCase : int = self.sp_model.IdToPiece(UpperCamelCase_)
return token
def a_ ( self : Dict , UpperCamelCase_ : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : int = []
__UpperCAmelCase : Tuple = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase_) + token
__UpperCAmelCase : List[Any] = []
else:
current_sub_tokens.append(UpperCamelCase_)
out_string += self.sp_model.decode(UpperCamelCase_)
return out_string.strip()
def a_ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__UpperCAmelCase : Tuple = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCamelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCamelCase_ , "wb") as fi:
__UpperCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_)
return (out_vocab_file,)
| 77 | 1 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_lowercase = """\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
_lowercase = """\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper \"Evaluating Large Language Models Trained on Code\"
(https://arxiv.org/abs/2107.03374).
"""
_lowercase = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric(\"code_eval\")
>>> test_cases = [\"assert add(2,3)==5\"]
>>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{'pass@1': 0.5, 'pass@2': 1.0}
"""
_lowercase = """
################################################################################
!!!WARNING!!!
################################################################################
The \"code_eval\" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper \"Evaluating Large
Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this
with:
>>> import os
>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"
################################################################################\
"""
_lowercase = """The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the \"Software\"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def _lowercase ( self , _lowercase , _lowercase , _lowercase=[1, 10, 100] , _lowercase=4 , _lowercase=3.0 ):
"""simple docstring"""
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=UpperCAmelCase__ ) as executor:
_lowerCAmelCase = []
_lowerCAmelCase = Counter()
_lowerCAmelCase = 0
_lowerCAmelCase = defaultdict(UpperCAmelCase__ )
for task_id, (candidates, test_case) in enumerate(zip(UpperCAmelCase__ , UpperCAmelCase__ ) ):
for candidate in candidates:
_lowerCAmelCase = candidate + '''\n''' + test_case
_lowerCAmelCase = (test_program, timeout, task_id, completion_id[task_id])
_lowerCAmelCase = executor.submit(UpperCAmelCase__ , *UpperCAmelCase__ )
futures.append(UpperCAmelCase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(UpperCAmelCase__ ):
_lowerCAmelCase = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
_lowerCAmelCase = [], []
for result in results.values():
result.sort()
_lowerCAmelCase = [r[1]['''passed'''] for r in result]
total.append(len(UpperCAmelCase__ ) )
correct.append(sum(UpperCAmelCase__ ) )
_lowerCAmelCase = np.array(UpperCAmelCase__ )
_lowerCAmelCase = np.array(UpperCAmelCase__ )
_lowerCAmelCase = k
_lowerCAmelCase = {F'pass@{k}': estimate_pass_at_k(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def A (__lowerCamelCase :int , __lowerCamelCase :List[str] , __lowerCamelCase :List[Any] ):
def estimator(__lowerCamelCase :int , __lowerCamelCase :int , __lowerCamelCase :int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase = itertools.repeat(__lowerCamelCase , len(__lowerCamelCase ) )
else:
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
_lowerCAmelCase = iter(__lowerCamelCase )
return np.array([estimator(int(__lowerCamelCase ) , int(__lowerCamelCase ) , __lowerCamelCase ) for n, c in zip(__lowerCamelCase , __lowerCamelCase )] )
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
_lowercase = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 162 | 0 |
from math import ceil, sqrt
def snake_case ( lowerCamelCase = 1_000_000 ):
'''simple docstring'''
__lowercase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__lowercase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__lowercase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'''{solution() = }''')
| 80 |
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = [[] for _ in range(lowerCamelCase )]
__lowercase = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(lowerCamelCase ) <= key:
return input_string
for position, character in enumerate(lowerCamelCase ):
__lowercase = position % (lowest * 2) # puts it in bounds
__lowercase = min(lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowerCamelCase )
__lowercase = ["""""".join(lowerCamelCase ) for row in temp_grid]
__lowercase = """""".join(lowerCamelCase )
return output_string
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = []
__lowercase = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
__lowercase = [[] for _ in range(lowerCamelCase )] # generates template
for position in range(len(lowerCamelCase ) ):
__lowercase = position % (lowest * 2) # puts it in bounds
__lowercase = min(lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
__lowercase = 0
for row in temp_grid: # fills in the characters
__lowercase = input_string[counter : counter + len(lowerCamelCase )]
grid.append(list(lowerCamelCase ) )
counter += len(lowerCamelCase )
__lowercase = """""" # reads as zigzag
for position in range(len(lowerCamelCase ) ):
__lowercase = position % (lowest * 2) # puts it in bounds
__lowercase = min(lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = {}
for key_guess in range(1 , len(lowerCamelCase ) ): # tries every key
__lowercase = decrypt(lowerCamelCase , lowerCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 1 |
from math import factorial
def _lowerCamelCase ( A_ : int = 1_0_0 ) -> int:
'''simple docstring'''
return sum(int(A_ ) for x in str(factorial(A_ ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 582 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _lowerCamelCase ( A_ : Any ) -> str:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def _lowerCamelCase ( A_ : Optional[Any] , A_ : int ) -> str:
'''simple docstring'''
return (-y * np.log(A_ ) - (1 - y) * np.log(1 - h )).mean()
def _lowerCamelCase ( A_ : Any , A_ : Union[str, Any] , A_ : str ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Dict =np.dot(A_ , A_ )
return np.sum(y * scores - np.log(1 + np.exp(A_ ) ) )
def _lowerCamelCase ( A_ : Optional[int] , A_ : List[str] , A_ : Any , A_ : Dict=7_0_0_0_0 ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Tuple =np.zeros(x.shape[1] )
for iterations in range(A_ ):
UpperCamelCase__ : List[Any] =np.dot(A_ , A_ )
UpperCamelCase__ : Optional[int] =sigmoid_function(A_ )
UpperCamelCase__ : Optional[Any] =np.dot(x.T , h - y ) / y.size
UpperCamelCase__ : Optional[int] =theta - alpha * gradient # updating the weights
UpperCamelCase__ : Union[str, Any] =np.dot(A_ , A_ )
UpperCamelCase__ : Any =sigmoid_function(A_ )
UpperCamelCase__ : Dict =cost_function(A_ , A_ )
if iterations % 1_0_0 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__UpperCAmelCase = datasets.load_iris()
__UpperCAmelCase = iris.data[:, :2]
__UpperCAmelCase = (iris.target != 0) * 1
__UpperCAmelCase = 0.1
__UpperCAmelCase = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def _lowerCamelCase ( A_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return sigmoid_function(
np.dot(A_ , A_ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
((__UpperCAmelCase) , (__UpperCAmelCase)) = (x[:, 0].min(), x[:, 0].max())
((__UpperCAmelCase) , (__UpperCAmelCase)) = (x[:, 1].min(), x[:, 1].max())
((__UpperCAmelCase) , (__UpperCAmelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__UpperCAmelCase = np.c_[xxa.ravel(), xxa.ravel()]
__UpperCAmelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 582 | 1 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =StableDiffusionControlNetImgaImgPipeline
__UpperCamelCase =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__UpperCamelCase =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCamelCase =IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
__UpperCamelCase =IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase ( self : Tuple , snake_case__ : Dict , snake_case__ : Tuple=0 ):
"""simple docstring"""
if str(lowerCamelCase_ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=lowerCamelCase_ , device=torch.device(lowerCamelCase_ ) , )
SCREAMING_SNAKE_CASE = floats_tensor(control_image.shape , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((6_4, 6_4) )
SCREAMING_SNAKE_CASE = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =StableDiffusionControlNetImgaImgPipeline
__UpperCamelCase =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__UpperCamelCase =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCamelCase =frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCamelCase ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
def init_weights(snake_case__ : int ):
if isinstance(lowerCamelCase_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(lowerCamelCase_ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(lowerCamelCase_ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE = MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Union[str, Any]=0 ):
"""simple docstring"""
if str(lowerCamelCase_ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = [
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=lowerCamelCase_ , device=torch.device(lowerCamelCase_ ) , ),
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=lowerCamelCase_ , device=torch.device(lowerCamelCase_ ) , ),
]
SCREAMING_SNAKE_CASE = floats_tensor(control_image[0].shape , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((6_4, 6_4) )
SCREAMING_SNAKE_CASE = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = 10.0
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = steps
SCREAMING_SNAKE_CASE = scale
SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = steps
SCREAMING_SNAKE_CASE = scale
SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = steps
SCREAMING_SNAKE_CASE = scale
SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = steps
SCREAMING_SNAKE_CASE = scale
SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : str ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCamelCase_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
SCREAMING_SNAKE_CASE = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ , controlnet=lowerCamelCase_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
SCREAMING_SNAKE_CASE = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE = 'evil space-punk bird'
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((5_1_2, 5_1_2) )
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((5_1_2, 5_1_2) )
SCREAMING_SNAKE_CASE = pipe(
lowerCamelCase_ , lowerCamelCase_ , control_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='np' , num_inference_steps=5_0 , strength=0.6 , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9E-2
| 439 |
def _lowercase ( a__ : list ) -> list:
"""simple docstring"""
if any(not isinstance(a__ , a__ ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(a__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(a__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 147 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : List[str] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 720 |
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : int = "▁"
snake_case_ : str = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
snake_case_ : int = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
snake_case_ : Optional[Any] = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
snake_case_ : Dict = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
snake_case_ : Any = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class __a (lowerCamelCase ):
__a : List[str] = ["input_ids"]
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : Tuple = PRETRAINED_INIT_CONFIGURATION
__a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = RESOURCE_FILES_NAMES
def __init__( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : int=None , __magic_name__ : str=False , __magic_name__ : int="utf8" , __magic_name__ : Optional[int]="[UNK]" , __magic_name__ : Dict="[SEP]" , __magic_name__ : List[Any]="[PAD]" , __magic_name__ : str="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Union[str, Any] , ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , vocab_file=__magic_name__ , encoding=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
UpperCAmelCase_ : Optional[Any] = do_lower_case
UpperCAmelCase_ : List[str] = sentencepiece_model_ckpt
UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__magic_name__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase_ : List[Any] = self.load_vocab(filepath=__magic_name__ )
else:
UpperCAmelCase_ : str = {self.sp_model.id_to_piece(__magic_name__ ): id for id in range(self.sp_model.get_piece_size() )}
UpperCAmelCase_ : int = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Any ) -> Any:
"""simple docstring"""
if text is None:
return None
UpperCAmelCase_ : str = self.tokenize(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : str = '''''', []
for i, ch in enumerate(__magic_name__ ):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase_ : Optional[int] = self.SP_CHAR_MAPPING.get(__magic_name__ )
else:
UpperCAmelCase_ : Union[str, Any] = unicodedata.normalize('''NFKC''' , __magic_name__ )
if self.is_whitespace(__magic_name__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__magic_name__ ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase_ : Optional[int] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase_ : Tuple = token[1:]
UpperCAmelCase_ : int = text[offset:].index(__magic_name__ ) + offset
UpperCAmelCase_ : Optional[int] = start + len(__magic_name__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCAmelCase_ : int = end
return token_mapping
@property
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
return len(self.vocab )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.__dict__.copy()
UpperCAmelCase_ : Optional[Any] = None
return state
def __setstate__( self : str , __magic_name__ : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Any ) -> List[str]:
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__magic_name__ , __magic_name__ ) for c in text) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Any=False , __magic_name__ : List[str]=64 , __magic_name__ : List[str]=0.1 ) -> List[str]:
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCAmelCase_ : Dict = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCAmelCase_ : Union[str, Any] = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCAmelCase_ : Any = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(__magic_name__ )
else:
UpperCAmelCase_ : Dict = self.sp_model.SampleEncodeAsPieces(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : List[Any] = []
for pi, piece in enumerate(__magic_name__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__magic_name__ ) and pi != 0:
new_pieces.append(__magic_name__ )
continue
else:
continue
UpperCAmelCase_ : List[str] = 0
for i, chunk in enumerate(__magic_name__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__magic_name__ ) or self.is_punct(__magic_name__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__magic_name__ )
UpperCAmelCase_ : List[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : str = i
if len(__magic_name__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = self.convert_ids_to_tokens(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.reverse_vocab.get(__magic_name__ , self.unk_token )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : Union[str, Any]=None ) -> Any:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id]
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None ) -> int:
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]=False ) -> Optional[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1]
return [1] + ([0] * len(__magic_name__ )) + [1]
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__magic_name__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__magic_name__ ) + 1) + [1] * (len(__magic_name__ ) + 3)
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[int] ) -> str:
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] ) -> Dict:
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__magic_name__ ) == 1:
UpperCAmelCase_ : Optional[Any] = unicodedata.category(__magic_name__ )
if cat == "Zs":
return True
return False
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = {}
with io.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__magic_name__ ):
UpperCAmelCase_ : List[Any] = line.rstrip('''\n''' )
UpperCAmelCase_ : Dict = int(__magic_name__ )
return token_to_idx
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 0
if os.path.isdir(__magic_name__ ):
UpperCAmelCase_ : Any = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCAmelCase_ : List[str] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __magic_name__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCAmelCase_ : Dict = token_index
writer.write(token + '''\n''' )
index += 1
UpperCAmelCase_ : Union[str, Any] = os.path.join(__magic_name__ , '''sentencepiece.bpe.model''' )
with open(__magic_name__ , '''wb''' ) as fi:
UpperCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (vocab_file,)
| 644 | 0 |
'''simple docstring'''
from __future__ import annotations
def __a ( A__ , A__ ) -> list[tuple[int, int]]:
lowerCAmelCase , lowerCAmelCase = position
lowerCAmelCase = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowerCAmelCase = []
for position in positions:
lowerCAmelCase , lowerCAmelCase = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(A__ )
return permissible_positions
def __a ( A__ ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def __a ( A__ , A__ , A__ ) -> bool:
if is_complete(A__ ):
return True
for position in get_valid_pos(A__ , len(A__ ) ):
lowerCAmelCase , lowerCAmelCase = position
if board[y][x] == 0:
lowerCAmelCase = curr + 1
if open_knight_tour_helper(A__ , A__ , curr + 1 ):
return True
lowerCAmelCase = 0
return False
def __a ( A__ ) -> list[list[int]]:
lowerCAmelCase = [[0 for i in range(A__ )] for j in range(A__ )]
for i in range(A__ ):
for j in range(A__ ):
lowerCAmelCase = 1
if open_knight_tour_helper(A__ , (i, j) , 1 ):
return board
lowerCAmelCase = 0
lowerCAmelCase = f"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649 |
'''simple docstring'''
def __a ( A__ , A__ ) -> int:
return int((input_a, input_a).count(0 ) == 0 )
def __a ( ) -> None:
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 649 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = False ,lowerCamelCase_ = False ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> Dict:
A = path_or_paths
A = split if split or isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else """train"""
A = features
A = cache_dir
A = keep_in_memory
A = streaming
A = num_proc
A = kwargs
@abstractmethod
def UpperCamelCase__ ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = False ,lowerCamelCase_ = False ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> int:
A = features
A = cache_dir
A = keep_in_memory
A = streaming
A = num_proc
A = kwargs
@abstractmethod
def UpperCamelCase__ ( self ) -> Union[Dataset, IterableDataset]:
pass
| 716 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ="https://openaipublic.azureedge.net/jukebox/models/"
UpperCAmelCase ={
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def _A ( _a : Optional[Any] ):
"""simple docstring"""
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 1_0:
A = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 1_0:
A = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 1_0:
A = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 1_0:
A = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
A = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
A = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
A = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
A = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def _A ( _a : Union[str, Any] , _a : Union[str, Any] , _a : Union[str, Any] , _a : List[Any] ):
"""simple docstring"""
A = {}
import re
A = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
A = re.compile(
r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
A = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
A = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
A = re.compile(
r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
A = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
A = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
A = re.compile(
r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
A = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_a ):
A = re_encoder_block_conv_in.match(_a )
A = regex_match.groups()
A = int(groups[2] ) * 2 + int(groups[3] )
A = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
A = re_encoder_block_conv_in.sub(_a , _a )
elif re_encoder_block_resnet.fullmatch(_a ):
A = re_encoder_block_resnet.match(_a )
A = regex_match.groups()
A = int(groups[2] ) * 2 + int(groups[3] )
A = {"""1""": 1, """3""": 2}[groups[-2]]
A = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
A = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
A = prefix + resnet_block
A = re_encoder_block_resnet.sub(_a , _a )
elif re_encoder_block_proj_out.fullmatch(_a ):
A = re_encoder_block_proj_out.match(_a )
A = regex_match.groups()
A = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
A = re_encoder_block_proj_out.sub(_a , _a )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_a ):
A = re_decoder_block_conv_out.match(_a )
A = regex_match.groups()
A = int(groups[2] ) * 2 + int(groups[3] ) - 2
A = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
A = re_decoder_block_conv_out.sub(_a , _a )
elif re_decoder_block_resnet.fullmatch(_a ):
A = re_decoder_block_resnet.match(_a )
A = regex_match.groups()
A = int(groups[2] ) * 2 + int(groups[3] ) - 2
A = {"""1""": 1, """3""": 2}[groups[-2]]
A = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
A = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
A = prefix + resnet_block
A = re_decoder_block_resnet.sub(_a , _a )
elif re_decoder_block_proj_in.fullmatch(_a ):
A = re_decoder_block_proj_in.match(_a )
A = regex_match.groups()
A = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
A = re_decoder_block_proj_in.sub(_a , _a )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_a ):
A = re_prior_cond_conv_out.match(_a )
A = regex_match.groups()
A = int(groups[1] ) * 2 + int(groups[2] ) - 2
A = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
A = re_prior_cond_conv_out.sub(_a , _a )
elif re_prior_cond_resnet.fullmatch(_a ):
A = re_prior_cond_resnet.match(_a )
A = regex_match.groups()
A = int(groups[1] ) * 2 + int(groups[2] ) - 2
A = {"""1""": 1, """3""": 2}[groups[-2]]
A = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
A = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
A = prefix + resnet_block
A = re_prior_cond_resnet.sub(_a , _a )
elif re_prior_cond_proj_in.fullmatch(_a ):
A = re_prior_cond_proj_in.match(_a )
A = regex_match.groups()
A = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
A = re_prior_cond_proj_in.sub(_a , _a )
# keep original key
else:
A = original_key
A = replace_key(_a )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
A = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
A = original_key
A = original_key
A = value
return new_dict
@torch.no_grad()
def _A ( _a : Optional[Any]=None , _a : str=None ):
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
A = requests.get(f'{PREFIX}{file}' , allow_redirects=_a )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=_a )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , """wb""" ).write(r.content )
A = MODEL_MAPPING[model_name.split("""/""" )[-1]]
A = JukeboxConfig.from_pretrained(_a )
A = JukeboxModel(_a )
A = []
A = {}
for i, dict_name in enumerate(_a ):
A = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )["""model"""]
A = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
A = old_dic[k]
elif k.endswith(""".w""" ):
A = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
A = old_dic[k]
else:
A = old_dic[k]
A = """vqvae""" if i == 0 else f'priors.{3 - i}'
A = fix_jukebox_keys(_a , model.state_dict() , _a , _a )
weight_dict.append(_a )
A = weight_dict.pop(0 )
model.vqvae.load_state_dict(_a )
for i in range(len(_a ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_a ).mkdir(exist_ok=_a )
with open(f'{pytorch_dump_folder_path}/mapping.json' , """w""" ) as txtfile:
json.dump(_a , _a )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_a )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
UpperCAmelCase =parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 255 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_a : int = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = ["DPTFeatureExtractor"]
_a : List[str] = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_a : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case_ :
'''simple docstring'''
def __init__( self, A_, A_=13, A_=[30, 30], A_=2, A_=3, A_=True, A_=True, A_=32, A_=5, A_=4, A_=37, A_="gelu", A_=0.1, A_=0.1, A_=10, A_=0.02, A_=3, A_=None, A_=8, A_=10, ) -> List[str]:
UpperCAmelCase__ =parent
UpperCAmelCase__ =batch_size
UpperCAmelCase__ =image_size
UpperCAmelCase__ =patch_size
UpperCAmelCase__ =num_channels
UpperCAmelCase__ =is_training
UpperCAmelCase__ =use_labels
UpperCAmelCase__ =hidden_size
UpperCAmelCase__ =num_hidden_layers
UpperCAmelCase__ =num_attention_heads
UpperCAmelCase__ =intermediate_size
UpperCAmelCase__ =hidden_act
UpperCAmelCase__ =hidden_dropout_prob
UpperCAmelCase__ =attention_probs_dropout_prob
UpperCAmelCase__ =type_sequence_label_size
UpperCAmelCase__ =initializer_range
UpperCAmelCase__ =num_labels
UpperCAmelCase__ =scope
UpperCAmelCase__ =n_targets
UpperCAmelCase__ =num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
UpperCAmelCase__ =(image_size[1] // patch_size) * (image_size[0] // patch_size)
UpperCAmelCase__ =num_patches + 1 + self.num_detection_tokens
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ =floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
UpperCAmelCase__ =None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
UpperCAmelCase__ =[]
for i in range(self.batch_size ):
UpperCAmelCase__ ={}
UpperCAmelCase__ =torch.randint(
high=self.num_labels, size=(self.n_targets,), device=A_ )
UpperCAmelCase__ =torch.rand(self.n_targets, 4, device=A_ )
labels.append(A_ )
UpperCAmelCase__ =self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ) -> Dict:
return YolosConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=A_, initializer_range=self.initializer_range, num_detection_tokens=self.num_detection_tokens, num_labels=self.num_labels, )
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Tuple:
UpperCAmelCase__ =YolosModel(config=A_ )
model.to(A_ )
model.eval()
UpperCAmelCase__ =model(A_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Union[str, Any]:
UpperCAmelCase__ =YolosForObjectDetection(A_ )
model.to(A_ )
model.eval()
UpperCAmelCase__ =model(pixel_values=A_ )
UpperCAmelCase__ =model(A_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4) )
UpperCAmelCase__ =model(pixel_values=A_, labels=A_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4) )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase__ =self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =config_and_inputs
UpperCAmelCase__ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( a, a, unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
__UpperCamelCase = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def __UpperCAmelCase ( self, A_, A_, A_=False ) -> Dict:
UpperCAmelCase__ =super()._prepare_for_class(A_, A_, return_labels=A_ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
UpperCAmelCase__ =[]
for i in range(self.model_tester.batch_size ):
UpperCAmelCase__ ={}
UpperCAmelCase__ =torch.ones(
size=(self.model_tester.n_targets,), device=A_, dtype=torch.long )
UpperCAmelCase__ =torch.ones(
self.model_tester.n_targets, 4, device=A_, dtype=torch.float )
labels.append(A_ )
UpperCAmelCase__ =labels
return inputs_dict
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase__ =YolosModelTester(self )
UpperCAmelCase__ =ConfigTester(self, config_class=A_, has_text_modality=A_, hidden_size=37 )
def __UpperCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ) -> Union[str, Any]:
# YOLOS does not use inputs_embeds
pass
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCAmelCase__ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_, nn.Linear ) )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ =model_class(A_ )
UpperCAmelCase__ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ =[*signature.parameters.keys()]
UpperCAmelCase__ =["pixel_values"]
self.assertListEqual(arg_names[:1], A_ )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ =True
# in YOLOS, the seq_len is different
UpperCAmelCase__ =self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
UpperCAmelCase__ =True
UpperCAmelCase__ =False
UpperCAmelCase__ =True
UpperCAmelCase__ =model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ =model(**self._prepare_for_class(A_, A_ ) )
UpperCAmelCase__ =outputs.attentions
self.assertEqual(len(A_ ), self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase__ =True
UpperCAmelCase__ =model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ =model(**self._prepare_for_class(A_, A_ ) )
UpperCAmelCase__ =outputs.attentions
self.assertEqual(len(A_ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
UpperCAmelCase__ =len(A_ )
# Check attention is always last and order is fine
UpperCAmelCase__ =True
UpperCAmelCase__ =True
UpperCAmelCase__ =model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ =model(**self._prepare_for_class(A_, A_ ) )
UpperCAmelCase__ =1
self.assertEqual(out_len + added_hidden_states, len(A_ ) )
UpperCAmelCase__ =outputs.attentions
self.assertEqual(len(A_ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
def __UpperCAmelCase ( self ) -> List[str]:
def check_hidden_states_output(A_, A_, A_ ):
UpperCAmelCase__ =model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ =model(**self._prepare_for_class(A_, A_ ) )
UpperCAmelCase__ =outputs.hidden_states
UpperCAmelCase__ =getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A_ ), A_ )
# YOLOS has a different seq_length
UpperCAmelCase__ =self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ =True
check_hidden_states_output(A_, A_, A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ =True
check_hidden_states_output(A_, A_, A_ )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*A_ )
@slow
def __UpperCAmelCase ( self ) -> Any:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ =YolosModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ) -> Any:
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase__ =YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(A_ )
UpperCAmelCase__ =self.default_image_processor
UpperCAmelCase__ =prepare_img()
UpperCAmelCase__ =image_processor(images=A_, return_tensors="pt" ).to(A_ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ =model(inputs.pixel_values )
# verify outputs
UpperCAmelCase__ =torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape, A_ )
UpperCAmelCase__ =torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]], device=A_, )
UpperCAmelCase__ =torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]], device=A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], A_, atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], A_, atol=1E-4 ) )
# verify postprocessing
UpperCAmelCase__ =image_processor.post_process_object_detection(
A_, threshold=0.3, target_sizes=[image.size[::-1]] )[0]
UpperCAmelCase__ =torch.tensor([0.99_94, 0.97_90, 0.99_64, 0.99_72, 0.98_61] ).to(A_ )
UpperCAmelCase__ =[75, 75, 17, 63, 17]
UpperCAmelCase__ =torch.tensor([3_35.06_09, 79.38_48, 3_75.42_16, 1_87.24_95] ).to(A_ )
self.assertEqual(len(results["scores"] ), 5 )
self.assertTrue(torch.allclose(results["scores"], A_, atol=1E-4 ) )
self.assertSequenceEqual(results["labels"].tolist(), A_ )
self.assertTrue(torch.allclose(results["boxes"][0, :], A_ ) )
| 625 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : int = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 715 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
@dataclass
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self : str , **_lowercase : Union[str, Any] ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A = deprecated_arg[3:]
A = not kwargs.pop(_lowercase )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
A = kwargs.pop('tpu_name' , self.tpu_name )
A = kwargs.pop('device_idx' , self.device_idx )
A = kwargs.pop('eager_mode' , self.eager_mode )
A = kwargs.pop('use_xla' , self.use_xla )
super().__init__(**_lowercase )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Name of TPU"""} , )
lowerCAmelCase = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
lowerCAmelCase = field(default=UpperCAmelCase_ , metadata={"""help""": """Benchmark models in eager model."""} )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def __a ( self : Optional[Any] ):
requires_backends(self , ['tf'] )
A = None
if self.tpu:
try:
if self.tpu_name:
A = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
A = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
A = None
return tpu
@cached_property
def __a ( self : Dict ):
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
A = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
A = tf.distribute.OneDeviceStrategy(device=f'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
A = tf.distribute.OneDeviceStrategy(device=f'/cpu:{self.device_idx}' )
return strategy
@property
def __a ( self : List[Any] ):
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def __a ( self : Optional[Any] ):
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def __a ( self : str ):
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def __a ( self : Any ):
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __a ( self : Dict ):
return self.n_gpu > 0
| 91 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowercase__ ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=4 , ) -> str:
_lowerCamelCase : str = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Union[str, Any] = is_training
_lowerCamelCase : Any = use_attention_mask
_lowerCamelCase : Optional[int] = use_token_type_ids
_lowerCamelCase : Any = use_labels
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : Dict = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : Optional[int] = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : List[str] = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : Any = type_sequence_label_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : List[str] = num_choices
def UpperCamelCase_ ( self) -> Union[str, Any]:
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowerCamelCase : Dict = None
if self.use_attention_mask:
_lowerCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCamelCase : int = None
if self.use_token_type_ids:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_lowerCamelCase : List[str] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : List[str] = True
_lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase__ ( A_ ,unittest.TestCase ):
__UpperCAmelCase = True
__UpperCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ ( self) -> str:
_lowerCamelCase : Tuple = FlaxRobertaModelTester(self)
@slow
def UpperCamelCase_ ( self) -> Dict:
for model_class_name in self.all_model_classes:
_lowerCamelCase : Any = model_class_name.from_pretrained("""roberta-base""" , from_pt=SCREAMING_SNAKE_CASE)
_lowerCamelCase : int = model(np.ones((1, 1)))
self.assertIsNotNone(SCREAMING_SNAKE_CASE)
| 88 |
"""simple docstring"""
from math import isqrt, loga
def _snake_case ( __snake_case : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __snake_case , __snake_case ):
_lowerCamelCase : Optional[int] = False
return [i for i in range(2 , __snake_case ) if is_prime[i]]
def _snake_case ( __snake_case : int = 800800 , __snake_case : int = 800800 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = degree * loga(__snake_case )
_lowerCamelCase : Union[str, Any] = int(__snake_case )
_lowerCamelCase : Dict = calculate_prime_numbers(__snake_case )
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : Any = 0
_lowerCamelCase : Any = len(__snake_case ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88 | 1 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> Optional[int]:
super().__init__()
lowercase__ : str = value_function
lowercase__ : List[str] = unet
lowercase__ : str = scheduler
lowercase__ : int = env
lowercase__ : Optional[Any] = env.get_dataset()
lowercase__ : int = {}
for key in self.data.keys():
try:
lowercase__ : Union[str, Any] = self.data[key].mean()
except: # noqa: E722
pass
lowercase__ : Dict = {}
for key in self.data.keys():
try:
lowercase__ : List[Any] = self.data[key].std()
except: # noqa: E722
pass
lowercase__ : int = env.observation_space.shape[0]
lowercase__ : Dict = env.action_space.shape[0]
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
return (x_in - self.means[key]) / self.stds[key]
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
return x_in * self.stds[key] + self.means[key]
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Optional[Any]:
if type(lowerCamelCase__ ) is dict:
return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase__ , device=self.unet.device )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
for key, val in cond.items():
lowercase__ : Dict = val.clone()
return x_in
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
lowercase__ : Optional[Any] = x.shape[0]
lowercase__ : int = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase__ : Union[str, Any] = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCamelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase__ : str = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample
lowercase__ : Union[str, Any] = torch.autograd.grad([y.sum()] , [x] )[0]
lowercase__ : Dict = self.scheduler._get_variance(lowerCamelCase__ )
lowercase__ : Dict = torch.exp(0.5 * posterior_variance )
lowercase__ : Union[str, Any] = model_std * grad
lowercase__ : List[str] = 0
lowercase__ : List[str] = x.detach()
lowercase__ : Optional[int] = x + scale * grad
lowercase__ : Any = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
lowercase__ : List[str] = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowercase__ : Dict = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
lowercase__ : List[str] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
lowercase__ : Dict = self.to_torch(lowerCamelCase__ )
return x, y
def __call__( self , lowerCamelCase__ , lowerCamelCase__=64 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=0.1 ) -> Any:
# normalize the observations and create batch dimension
lowercase__ : Any = self.normalize(lowerCamelCase__ , """observations""" )
lowercase__ : List[Any] = obs[None].repeat(lowerCamelCase__ , axis=0 )
lowercase__ : List[Any] = {0: self.to_torch(lowerCamelCase__ )}
lowercase__ : Union[str, Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase__ : Tuple = randn_tensor(lowerCamelCase__ , device=self.unet.device )
lowercase__ : int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
lowercase__ : Optional[int] = self.to_torch(lowerCamelCase__ )
# run the diffusion process
lowercase__ , lowercase__ : Optional[int] = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# sort output trajectories by value
lowercase__ : Tuple = y.argsort(0 , descending=lowerCamelCase__ ).squeeze()
lowercase__ : Union[str, Any] = x[sorted_idx]
lowercase__ : Union[str, Any] = sorted_values[:, :, : self.action_dim]
lowercase__ : Tuple = actions.detach().cpu().numpy()
lowercase__ : Union[str, Any] = self.de_normalize(lowerCamelCase__ , key="""actions""" )
# select the action with the highest value
if y is not None:
lowercase__ : int = 0
else:
# if we didn't run value guiding, select a random action
lowercase__ : List[Any] = np.random.randint(0 , lowerCamelCase__ )
lowercase__ : List[str] = denorm_actions[selected_index, 0]
return denorm_actions | 128 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__snake_case = [
'small',
'small-base',
'medium',
'medium-base',
'intermediate',
'intermediate-base',
'large',
'large-base',
'xlarge',
'xlarge-base',
]
__snake_case = {
'vocab_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json',
'funnel-transformer/small-base': (
'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'
),
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json',
'funnel-transformer/large-base': (
'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'
),
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'
),
},
}
__snake_case = {F"funnel-transformer/{name}": 512 for name in _model_names}
__snake_case = {F"funnel-transformer/{name}": {'do_lower_case': True} for name in _model_names}
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
_a : Union[str, Any] = VOCAB_FILES_NAMES
_a : Any = PRETRAINED_VOCAB_FILES_MAP
_a : List[Any] = PRETRAINED_INIT_CONFIGURATION
_a : List[Any] = FunnelTokenizer
_a : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : int = 2
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__="<unk>" , lowerCamelCase__="<sep>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<cls>" , lowerCamelCase__="<mask>" , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__="##" , **lowerCamelCase__ , ) -> Union[str, Any]:
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , clean_text=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , wordpieces_prefix=lowerCamelCase__ , **lowerCamelCase__ , )
lowercase__ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase__ ) != tokenize_chinese_chars
):
lowercase__ : List[str] = getattr(lowerCamelCase__ , normalizer_state.pop("""type""" ) )
lowercase__ : Optional[Any] = do_lower_case
lowercase__ : Union[str, Any] = strip_accents
lowercase__ : Optional[Any] = tokenize_chinese_chars
lowercase__ : Union[str, Any] = normalizer_class(**lowerCamelCase__ )
lowercase__ : Union[str, Any] = do_lower_case
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__=None ) -> Tuple:
lowercase__ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
lowercase__ : Optional[int] = [self.sep_token_id]
lowercase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
lowercase__ : Optional[Any] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ ) | 128 | 1 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( lowerCamelCase_ : list[int] , lowerCamelCase_ : int ):
'''simple docstring'''
if len(lowerCamelCase_ ) < k or k < 0:
raise ValueError("Invalid Input" )
__magic_name__ = __magic_name__ = sum(array[:k] )
for i in range(len(lowerCamelCase_ ) - k ):
__magic_name__ = current_sum - array[i] + array[i + k]
__magic_name__ = max(lowerCamelCase_ , lowerCamelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__magic_name__ : List[str] =[randint(-10_00, 10_00) for i in range(1_00)]
__magic_name__ : List[str] =randint(0, 1_10)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 664 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Union[str, Any] ={'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str =[
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__magic_name__ : List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 664 | 1 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
snake_case_ : str = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
snake_case_ : Any = F'''https://www.google.com/search?q={query}&num=100'''
snake_case_ : int = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
snake_case_ : Union[str, Any] = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
snake_case_ : Optional[int] = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 708 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=1_8 , lowerCamelCase__=3_0 , lowerCamelCase__=4_0_0 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=[0.5, 0.5, 0.5] , lowerCamelCase__=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
UpperCamelCase = size if size is not None else {'''shortest_edge''': 1_8}
UpperCamelCase = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_center_crop
UpperCamelCase = crop_size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def UpperCAmelCase ( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase__ ( snake_case_, unittest.TestCase ):
'''simple docstring'''
_snake_case = LevitImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = LevitImageProcessingTester(self )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''size''' ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 350 | 0 |
"""simple docstring"""
def __magic_name__ ( UpperCamelCase : Optional[int] ) -> Optional[int]:
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
a__ = ""
while len(UpperCamelCase ) % 3 != 0:
a__ = "0" + bin_string
a__ = [
bin_string[index : index + 3]
for index in range(len(UpperCamelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
a__ = 0
for index, val in enumerate(UpperCamelCase ):
oct_val += int(2 ** (2 - index) * int(UpperCamelCase ) )
oct_string += str(UpperCamelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 273 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 62 | 0 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __lowerCamelCase (UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = flatten_dict(UpperCAmelCase__ )
return flax_params
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {
"token_embedder": "embeddings",
"encoder_norm": "layernorm",
"kernel": "weight",
".out": ".output",
"scale": "weight",
"embedders_0.pos_embedding": "row_embedder.weight",
"embedders_1.pos_embedding": "column_embedder.weight",
}
SCREAMING_SNAKE_CASE = {
"query": "attention.query",
"key": "attention.key",
"value": "attention.value",
"output.dense": "output",
"encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
"pre_self_attention_layer_norm": "self_attention.layer_norm",
"pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
"mlp.": "mlp.DenseReluDense.",
"pre_mlp_layer_norm": "mlp.layer_norm",
"self_attention.o": "self_attention.attention.o",
"decoder.embeddings.embedding": "decoder.embed_tokens.weight",
"decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
"decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
"decoder.logits_dense.weight": "decoder.lm_head.weight",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
SCREAMING_SNAKE_CASE = ".".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
SCREAMING_SNAKE_CASE = new_key.replace(UpperCAmelCase__ , UpperCAmelCase__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
SCREAMING_SNAKE_CASE = new_key.replace(UpperCAmelCase__ , UpperCAmelCase__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
SCREAMING_SNAKE_CASE = re.sub(r"layers_(\d+)" , r"layer.\1" , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = new_key.replace("encoder" , "encoder.encoder" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
SCREAMING_SNAKE_CASE = re.sub(r"layers_(\d+)" , r"layer.\1" , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = flax_dict[key]
SCREAMING_SNAKE_CASE = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
SCREAMING_SNAKE_CASE = torch.from_numpy(converted_dict[key].T )
else:
SCREAMING_SNAKE_CASE = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : Any=False ):
SCREAMING_SNAKE_CASE = get_flax_param(UpperCAmelCase__ )
if not use_large:
SCREAMING_SNAKE_CASE = PixaStructVisionConfig()
SCREAMING_SNAKE_CASE = PixaStructTextConfig()
else:
SCREAMING_SNAKE_CASE = PixaStructVisionConfig(
hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_attention_heads=2_4 , num_hidden_layers=1_8 )
SCREAMING_SNAKE_CASE = PixaStructTextConfig(hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_heads=2_4 , num_layers=1_8 )
SCREAMING_SNAKE_CASE = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = PixaStructForConditionalGeneration(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = rename_and_convert_flax_params(UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" )
SCREAMING_SNAKE_CASE = PixaStructImageProcessor()
SCREAMING_SNAKE_CASE = PixaStructProcessor(image_processor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
if use_large:
SCREAMING_SNAKE_CASE = 4_0_9_6
SCREAMING_SNAKE_CASE = True
# mkdir if needed
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
print("Model saved in {}".format(UpperCAmelCase__ ) )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
_lowerCamelCase : int = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 647 | import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_lowerCamelCase : str = threading.Lock()
_lowerCamelCase : Optional[logging.Handler] = None
_lowerCamelCase : Any = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
_lowerCamelCase : Union[str, Any] = logging.WARNING
_lowerCamelCase : List[Any] = True
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = os.getenv("TRANSFORMERS_VERBOSITY" , UpperCAmelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def __lowerCamelCase ():
return __name__.split("." )[0]
def __lowerCamelCase ():
return logging.getLogger(_get_library_name() )
def __lowerCamelCase ():
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
SCREAMING_SNAKE_CASE = logging.StreamHandler() # Set sys.stderr as stream.
SCREAMING_SNAKE_CASE = sys.stderr.flush
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ():
global _default_handler
with _lock:
if not _default_handler:
return
SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
SCREAMING_SNAKE_CASE = None
def __lowerCamelCase ():
return log_levels
def __lowerCamelCase (UpperCAmelCase__ : Optional[str] = None ):
if name is None:
SCREAMING_SNAKE_CASE = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __lowerCamelCase (UpperCAmelCase__ : int ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __lowerCamelCase ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __lowerCamelCase (UpperCAmelCase__ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ():
_configure_library_root_logger()
SCREAMING_SNAKE_CASE = True
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
SCREAMING_SNAKE_CASE = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(UpperCAmelCase__ )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(UpperCAmelCase__ )
def __lowerCamelCase (self : str , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ):
SCREAMING_SNAKE_CASE = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , UpperCAmelCase__ )
if no_advisory_warnings:
return
self.warning(*UpperCAmelCase__ , **UpperCAmelCase__ )
_lowerCamelCase : str = warning_advice
@functools.lru_cache(UpperCAmelCase__ )
def __lowerCamelCase (self : List[str] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int ):
self.warning(*UpperCAmelCase__ , **UpperCAmelCase__ )
_lowerCamelCase : Dict = warning_once
class lowercase :
def __init__( self : List[Any] , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : str ) -> List[Any]: # pylint: disable=unused-argument
'''simple docstring'''
SCREAMING_SNAKE_CASE = args[0] if args else None
def __iter__( self : Optional[Any] ) -> str:
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[str] , _UpperCamelCase : Any ) -> List[Any]:
'''simple docstring'''
def empty_fn(*_UpperCamelCase : List[str] , **_UpperCamelCase : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Any ) -> Optional[Any]:
'''simple docstring'''
return self
def __exit__( self : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return
class lowercase :
def __call__( self : Union[str, Any] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Tuple ) -> Tuple:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_UpperCamelCase , **_UpperCamelCase )
else:
return EmptyTqdm(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Dict , *_UpperCamelCase : Dict , **_UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_lowerCamelCase : Union[str, Any] = _tqdm_cls()
def __lowerCamelCase ():
global _tqdm_active
return bool(_tqdm_active )
def __lowerCamelCase ():
global _tqdm_active
SCREAMING_SNAKE_CASE = True
hf_hub_utils.enable_progress_bars()
def __lowerCamelCase ():
global _tqdm_active
SCREAMING_SNAKE_CASE = False
hf_hub_utils.disable_progress_bars()
| 647 | 1 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase_ : Tuple = list[tuple[int, int]]
lowerCAmelCase_ : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCAmelCase_ : Optional[int] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : Optional[Any] , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = pos_x
_UpperCamelCase : List[Any] = pos_y
_UpperCamelCase : Any = (pos_y, pos_x)
_UpperCamelCase : Union[str, Any] = goal_x
_UpperCamelCase : Optional[Any] = goal_y
_UpperCamelCase : str = g_cost
_UpperCamelCase : Union[str, Any] = parent
_UpperCamelCase : str = self.calculate_heuristic()
def snake_case__ ( self : List[str] ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = abs(self.pos_x - self.goal_x )
_UpperCamelCase : Tuple = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Optional[Any] , lowercase__ : int ) ->Tuple:
'''simple docstring'''
return self.f_cost < other.f_cost
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase__ : Dict , lowercase__ : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCamelCase : List[str] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _a )
_UpperCamelCase : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , _a )
_UpperCamelCase : Optional[Any] = [self.start]
_UpperCamelCase : Tuple = []
_UpperCamelCase : Tuple = False
def snake_case__ ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_UpperCamelCase : Optional[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_UpperCamelCase : Optional[Any] = True
return self.retrace_path(_a )
self.closed_nodes.append(_a )
_UpperCamelCase : Optional[int] = self.get_successors(_a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_a )
else:
# retrieve the best current path
_UpperCamelCase : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_a )
else:
self.open_nodes.append(_a )
if not self.reached:
return [self.start.pos]
return None
def snake_case__ ( self : Any , lowercase__ : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : Dict = []
for action in delta:
_UpperCamelCase : Optional[Any] = parent.pos_x + action[1]
_UpperCamelCase : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_a , _a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _a , ) )
return successors
def snake_case__ ( self : List[str] , lowercase__ : Optional[int] ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase : Any = node
_UpperCamelCase : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_UpperCamelCase : Tuple = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = (0, 0)
lowerCAmelCase_ : Tuple = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
lowerCAmelCase_ : Dict = GreedyBestFirst(init, goal)
lowerCAmelCase_ : Union[str, Any] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowerCAmelCase_ : Tuple = 2
for elem in grid:
print(elem)
| 435 |
"""simple docstring"""
from __future__ import annotations
lowercase_ = list[tuple[int, int]]
lowercase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase_ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , ):
__a = pos_x
__a = pos_y
__a = (pos_y, pos_x)
__a = goal_x
__a = goal_y
__a = g_cost
__a = parent
__a = self.calculate_heuristic()
def __UpperCAmelCase ( self ):
__a = abs(self.pos_x - self.goal_x )
__a = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _a ):
return self.f_cost < other.f_cost
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a ):
__a = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _a )
__a = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , _a )
__a = [self.start]
__a = []
__a = False
def __UpperCAmelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__a = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__a = True
return self.retrace_path(_a )
self.closed_nodes.append(_a )
__a = self.get_successors(_a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_a )
else:
# retrieve the best current path
__a = self.open_nodes.pop(self.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_a )
else:
self.open_nodes.append(_a )
if not self.reached:
return [self.start.pos]
return None
def __UpperCAmelCase ( self , _a ):
__a = []
for action in delta:
__a = parent.pos_x + action[1]
__a = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_a , _a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _a , ) )
return successors
def __UpperCAmelCase ( self , _a ):
__a = node
__a = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__a = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowercase_ = (0, 0)
lowercase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
lowercase_ = GreedyBestFirst(init, goal)
lowercase_ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowercase_ = 2
for elem in grid:
print(elem)
| 695 | 0 |
'''simple docstring'''
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[str] = None
snake_case__ : Dict = None
snake_case__ : Union[str, Any] = graph
self._normalize_graph(lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[int] = len(lowerCamelCase )
snake_case__ : Union[str, Any] = None
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if sources is int:
snake_case__ : Optional[Any] = [sources]
if sinks is int:
snake_case__ : List[str] = [sinks]
if len(lowerCamelCase ) == 0 or len(lowerCamelCase ) == 0:
return
snake_case__ : Any = sources[0]
snake_case__ : Optional[Any] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(lowerCamelCase ) > 1 or len(lowerCamelCase ) > 1:
snake_case__ : List[str] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
snake_case__ : str = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
snake_case__ : List[Any] = max_input_flow
snake_case__ : Any = 0
snake_case__ : str = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
snake_case__ : str = max_input_flow
snake_case__ : Union[str, Any] = size - 1
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowercase__ ( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : str = algorithm(self )
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Tuple = flow_network
snake_case__ : List[str] = flow_network.verticesCount
snake_case__ : Optional[int] = flow_network.sourceIndex
snake_case__ : Optional[int] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
snake_case__ : Tuple = flow_network.graph
snake_case__ : List[Any] = False
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
if not self.executed:
self._algorithm()
snake_case__ : Tuple = True
def lowercase__ ( self ) -> int:
"""simple docstring"""
pass
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(lowerCamelCase )
# use this to save your result
snake_case__ : Optional[int] = -1
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
super().__init__(lowerCamelCase )
snake_case__ : int = [[0] * self.verticies_count for i in range(self.verticies_count )]
snake_case__ : Dict = [0] * self.verticies_count
snake_case__ : Optional[Any] = [0] * self.verticies_count
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
snake_case__ : List[Any] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
snake_case__ : Union[str, Any] = 0
while i < len(lowerCamelCase ):
snake_case__ : Optional[Any] = vertices_list[i]
snake_case__ : Tuple = self.heights[vertex_index]
self.process_vertex(lowerCamelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(lowerCamelCase ) )
snake_case__ : Optional[Any] = 0
else:
i += 1
snake_case__ : Optional[Any] = sum(self.preflow[self.source_index] )
def lowercase__ ( self , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(lowerCamelCase , lowerCamelCase )
self.relabel(lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
snake_case__ : List[str] = self.heights[to_index]
if min_height is not None:
snake_case__ : Tuple = min_height + 1
if __name__ == "__main__":
_lowerCAmelCase : Any = [0]
_lowerCAmelCase : int = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
_lowerCAmelCase : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
_lowerCAmelCase : str = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
_lowerCAmelCase : Optional[int] = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 694 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'encoder-decoder'
_lowerCAmelCase = True
def __init__( self , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case__ : List[str] = kwargs.pop('''encoder''' )
snake_case__ : Any = encoder_config.pop('''model_type''' )
snake_case__ : List[str] = kwargs.pop('''decoder''' )
snake_case__ : str = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
snake_case__ : Tuple = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : Optional[Any] = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : str = True
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> PretrainedConfig:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
snake_case__ : Optional[int] = True
snake_case__ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = copy.deepcopy(self.__dict__ )
snake_case__ : List[Any] = self.encoder.to_dict()
snake_case__ : str = self.decoder.to_dict()
snake_case__ : Any = self.__class__.model_type
return output
| 694 | 1 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
print('''Loading config file...''' )
def flatten_yaml_as_dict(_lowercase , _lowercase="" , _lowercase="." ):
UpperCAmelCase_ : Any = []
for k, v in d.items():
UpperCAmelCase_ : List[Any] = parent_key + sep + k if parent_key else k
if isinstance(snake_case_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case_ , snake_case_ , sep=snake_case_ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case_ )
UpperCAmelCase_ : Optional[int] = argparse.Namespace()
with open(snake_case_ , '''r''' ) as yaml_file:
try:
UpperCAmelCase_ : List[Any] = yaml.load(snake_case_ , Loader=yaml.FullLoader )
UpperCAmelCase_ : Any = flatten_yaml_as_dict(snake_case_ )
for k, v in flat_cfg.items():
setattr(snake_case_ , snake_case_ , snake_case_ )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(snake_case_ , str(snake_case_ ) ) )
return config
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = MobileViTVaConfig()
UpperCAmelCase_ : Optional[Any] = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
UpperCAmelCase_ : Dict = 1000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase_ : List[str] = 384
else:
UpperCAmelCase_ : int = 256
UpperCAmelCase_ : Union[str, Any] = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
UpperCAmelCase_ : Any = 21000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase_ : Dict = 384
else:
UpperCAmelCase_ : str = 256
UpperCAmelCase_ : str = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
UpperCAmelCase_ : List[str] = 151
UpperCAmelCase_ : Dict = 512
UpperCAmelCase_ : Optional[Any] = '''ade20k-id2label.json'''
UpperCAmelCase_ : Dict = True
elif task_name.startswith('''voc_''' ):
UpperCAmelCase_ : Any = 21
UpperCAmelCase_ : Union[str, Any] = 512
UpperCAmelCase_ : Optional[Any] = '''pascal-voc-id2label.json'''
UpperCAmelCase_ : List[str] = True
# orig_config
UpperCAmelCase_ : int = load_orig_config_file(snake_case_ )
assert getattr(snake_case_ , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
UpperCAmelCase_ : Dict = getattr(snake_case_ , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(snake_case_ , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCAmelCase_ : Any = getattr(snake_case_ , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCAmelCase_ : Tuple = getattr(snake_case_ , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
UpperCAmelCase_ : Dict = getattr(snake_case_ , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
UpperCAmelCase_ : Tuple = getattr(snake_case_ , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
UpperCAmelCase_ : int = getattr(snake_case_ , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
UpperCAmelCase_ : Optional[int] = '''huggingface/label-files'''
UpperCAmelCase_ : str = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ : Tuple = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ : str = idalabel
UpperCAmelCase_ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = dct.pop(snake_case_ )
UpperCAmelCase_ : Tuple = val
def lowerCamelCase__ ( _lowercase , _lowercase=False ):
'''simple docstring'''
if base_model:
UpperCAmelCase_ : int = ''''''
else:
UpperCAmelCase_ : int = '''mobilevitv2.'''
UpperCAmelCase_ : Dict = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCAmelCase_ : List[str] = k[8:]
else:
UpperCAmelCase_ : Optional[Any] = k
if ".block." in k:
UpperCAmelCase_ : Tuple = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
UpperCAmelCase_ : Optional[Any] = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
UpperCAmelCase_ : Tuple = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
UpperCAmelCase_ : Union[str, Any] = k_new.replace('''conv_1.''' , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
UpperCAmelCase_ : List[Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
UpperCAmelCase_ : List[Any] = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
UpperCAmelCase_ : str = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
UpperCAmelCase_ : List[Any] = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
UpperCAmelCase_ : Optional[Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
UpperCAmelCase_ : str = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
UpperCAmelCase_ : Union[str, Any] = [0, 1]
elif i == 4:
UpperCAmelCase_ : List[str] = [0, 1, 2, 3]
elif i == 5:
UpperCAmelCase_ : int = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
UpperCAmelCase_ : List[Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
UpperCAmelCase_ : List[Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
UpperCAmelCase_ : Dict = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
UpperCAmelCase_ : Optional[Any] = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
UpperCAmelCase_ : Tuple = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
UpperCAmelCase_ : Optional[Any] = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
UpperCAmelCase_ : Optional[int] = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
UpperCAmelCase_ : Dict = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
UpperCAmelCase_ : Any = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
UpperCAmelCase_ : List[str] = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
UpperCAmelCase_ : List[Any] = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
UpperCAmelCase_ : List[Any] = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(snake_case_ )
for k in keys_to_ignore:
state_dict.pop(snake_case_ , snake_case_ )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCAmelCase_ : List[Any] = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = get_mobilevitva_config(snake_case_ , snake_case_ )
# load original state_dict
UpperCAmelCase_ : Any = torch.load(snake_case_ , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
UpperCAmelCase_ : Union[str, Any] = MobileViTVaForSemanticSegmentation(snake_case_ ).eval()
UpperCAmelCase_ : str = False
else:
UpperCAmelCase_ : List[str] = MobileViTVaForImageClassification(snake_case_ ).eval()
UpperCAmelCase_ : Any = False
# remove and rename some keys of load the original model
UpperCAmelCase_ : Tuple = checkpoint
remove_unused_keys(snake_case_ )
UpperCAmelCase_ : str = create_rename_keys(snake_case_ , base_model=snake_case_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
# load modified state_dict
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase_ : List[str] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase_ : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase_ : Any = model(**snake_case_ )
# verify classification model
if task_name.startswith('''imagenet''' ):
UpperCAmelCase_ : str = outputs.logits
UpperCAmelCase_ : List[Any] = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCAmelCase_ : List[str] = torch.tensor([-1.6_3_3_6E0_0, -7.3_2_0_4E-0_2, -5.1_8_8_3E-0_1] )
assert torch.allclose(logits[0, :3] , snake_case_ , atol=1E-4 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
__a = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
) | 30 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
snake_case_ = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
snake_case_ = concatenate_datasets
snake_case_ = DownloadConfig
snake_case_ = DownloadManager
snake_case_ = DownloadMode
snake_case_ = DownloadConfig
snake_case_ = DownloadMode
snake_case_ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 592 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Tuple = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__lowercase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 315 |
from __future__ import annotations
from typing import Generic, TypeVar
__lowercase : Any = TypeVar('''T''')
class _A ( Generic[T] ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Optional[int] = data
snake_case : Dict = self
snake_case : str = 0
class _A ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
# map from node name to the node object
snake_case : dict[T, DisjointSetTreeNode[T]] = {}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# create a new set with x as its member
snake_case : Optional[Any] = DisjointSetTreeNode(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# find the set x belongs to (with path-compression)
snake_case : Union[str, Any] = self.map[data]
if elem_ref != elem_ref.parent:
snake_case : Union[str, Any] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# helper function for union operation
if nodea.rank > nodea.rank:
snake_case : Any = nodea
else:
snake_case : int = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# merge 2 disjoint sets
self.link(self.find_set(SCREAMING_SNAKE_CASE_ ) ,self.find_set(SCREAMING_SNAKE_CASE_ ) )
class _A ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
# connections: map from the node to the neighbouring nodes (with weights)
snake_case : dict[T, dict[T, int]] = {}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# add a node ONLY if its not present in the graph
if node not in self.connections:
snake_case : List[str] = {}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# add an edge with the given weight
self.add_node(SCREAMING_SNAKE_CASE_ )
self.add_node(SCREAMING_SNAKE_CASE_ )
snake_case : str = weight
snake_case : Optional[int] = weight
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = []
snake_case : int = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda SCREAMING_SNAKE_CASE_ : x[2] )
# creating the disjoint set
snake_case : Tuple = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(SCREAMING_SNAKE_CASE_ )
# MST generation
snake_case : str = 0
snake_case : Any = 0
snake_case : Union[str, Any] = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
snake_case , snake_case , snake_case : Union[str, Any] = edges[index]
index += 1
snake_case : Union[str, Any] = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
snake_case : Dict = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
disjoint_set.union(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
return graph
| 315 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
UpperCAmelCase : Dict = logging.getLogger(__name__)
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__a = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__a = field(
default=A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__a = field(
default=A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__a = field(
default=A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__a = field(
default=A , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__a = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__a = field(
default=A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__a = field(default=A , metadata={"""help""": """The input training data file (a text file)."""} )
__a = field(
default=A , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__a = field(
default=A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__a = field(
default=A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__a = field(
default=A , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__a = field(
default=A , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__a = field(
default=A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__a = field(
default=A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.train_file is not None:
__UpperCAmelCase : List[Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__UpperCAmelCase : List[str] = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__a = 42
__a = True
__a = None
__a = None
def __call__( self : Tuple , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : List[str] = """label""" if """label""" in features[0].keys() else """labels"""
__UpperCAmelCase : Union[str, Any] = [feature.pop(UpperCamelCase ) for feature in features]
__UpperCAmelCase : str = len(UpperCamelCase )
__UpperCAmelCase : Dict = len(features[0]["""input_ids"""] )
__UpperCAmelCase : int = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCamelCase )] for feature in features
]
__UpperCAmelCase : str = list(chain(*UpperCamelCase ) )
__UpperCAmelCase : int = self.tokenizer.pad(
UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
__UpperCAmelCase : Optional[Any] = {k: v.view(UpperCamelCase , UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
__UpperCAmelCase : int = torch.tensor(UpperCamelCase , dtype=torch.intaa )
return batch
def lowerCamelCase ( ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCAmelCase : Any = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
datasets.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__UpperCAmelCase : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__UpperCAmelCase : str = {}
if data_args.train_file is not None:
__UpperCAmelCase : str = data_args.train_file
if data_args.validation_file is not None:
__UpperCAmelCase : Union[str, Any] = data_args.validation_file
__UpperCAmelCase : List[Any] = data_args.train_file.split(""".""" )[-1]
__UpperCAmelCase : Optional[int] = load_dataset(
_UpperCamelCase , data_files=_UpperCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__UpperCAmelCase : Any = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : Dict = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__UpperCAmelCase : Dict = [f'''ending{i}''' for i in range(4 )]
__UpperCAmelCase : Any = """sent1"""
__UpperCAmelCase : List[str] = """sent2"""
if data_args.max_seq_length is None:
__UpperCAmelCase : List[str] = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
__UpperCAmelCase : str = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
__UpperCAmelCase : Optional[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_UpperCamelCase : str ):
__UpperCAmelCase : List[str] = [[context] * 4 for context in examples[context_name]]
__UpperCAmelCase : Union[str, Any] = examples[question_header_name]
__UpperCAmelCase : int = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(_UpperCamelCase )
]
# Flatten out
__UpperCAmelCase : List[str] = list(chain(*_UpperCamelCase ) )
__UpperCAmelCase : List[Any] = list(chain(*_UpperCamelCase ) )
# Tokenize
__UpperCAmelCase : Optional[int] = tokenizer(
_UpperCamelCase , _UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_UpperCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__UpperCAmelCase : List[Any] = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__UpperCAmelCase : Optional[int] = min(len(_UpperCamelCase ) , data_args.max_train_samples )
__UpperCAmelCase : Union[str, Any] = train_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
__UpperCAmelCase : List[str] = train_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__UpperCAmelCase : int = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__UpperCAmelCase : Dict = min(len(_UpperCamelCase ) , data_args.max_eval_samples )
__UpperCAmelCase : Any = eval_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
__UpperCAmelCase : Any = eval_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__UpperCAmelCase : Optional[int] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_UpperCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_UpperCamelCase : Dict ):
__UpperCAmelCase ,__UpperCAmelCase : List[str] = eval_predictions
__UpperCAmelCase : Optional[int] = np.argmax(_UpperCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__UpperCAmelCase : Tuple = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , compute_metrics=_UpperCamelCase , )
# Training
if training_args.do_train:
__UpperCAmelCase : str = None
if training_args.resume_from_checkpoint is not None:
__UpperCAmelCase : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCAmelCase : List[str] = last_checkpoint
__UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__UpperCAmelCase : Dict = train_result.metrics
__UpperCAmelCase : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCamelCase )
)
__UpperCAmelCase : List[Any] = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics("""train""" , _UpperCamelCase )
trainer.save_metrics("""train""" , _UpperCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__UpperCAmelCase : List[str] = trainer.evaluate()
__UpperCAmelCase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics("""eval""" , _UpperCamelCase )
trainer.save_metrics("""eval""" , _UpperCamelCase )
__UpperCAmelCase : Tuple = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
def lowerCamelCase ( _UpperCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 139 |
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase : Union[str, "sqlalchemy.sql.Selectable"] , UpperCamelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , UpperCamelCase : Optional[Features] = None , UpperCamelCase : str = None , UpperCamelCase : bool = False , **UpperCamelCase : List[Any] , ):
'''simple docstring'''
super().__init__(features=UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Dict = Sql(
cache_dir=UpperCamelCase , features=UpperCamelCase , sql=UpperCamelCase , con=UpperCamelCase , **UpperCamelCase , )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : str = None
__UpperCAmelCase : Dict = None
self.builder.download_and_prepare(
download_config=UpperCamelCase , download_mode=UpperCamelCase , verification_mode=UpperCamelCase , base_path=UpperCamelCase , )
# Build dataset for splits
__UpperCAmelCase : Optional[int] = self.builder.as_dataset(
split="""train""" , verification_mode=UpperCamelCase , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase : Dataset , UpperCamelCase : str , UpperCamelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , **UpperCamelCase : Tuple , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
__UpperCAmelCase : Tuple = dataset
__UpperCAmelCase : int = name
__UpperCAmelCase : Union[str, Any] = con
__UpperCAmelCase : Optional[int] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__UpperCAmelCase : Optional[int] = num_proc
__UpperCAmelCase : Any = to_sql_kwargs
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.to_sql_kwargs.pop("""sql""" , UpperCamelCase )
__UpperCAmelCase : Dict = self.to_sql_kwargs.pop("""con""" , UpperCamelCase )
__UpperCAmelCase : Any = self.to_sql_kwargs.pop("""index""" , UpperCamelCase )
__UpperCAmelCase : Dict = self._write(index=UpperCamelCase , **self.to_sql_kwargs )
return written
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Any = args
__UpperCAmelCase : Optional[int] = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
__UpperCAmelCase : Optional[int] = query_table(
table=self.dataset.data , key=slice(UpperCamelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
__UpperCAmelCase : Optional[int] = batch.to_pandas()
__UpperCAmelCase : Union[str, Any] = df.to_sql(self.name , self.con , index=UpperCamelCase , **UpperCamelCase )
return num_rows or len(UpperCamelCase )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Optional[int] , **UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__UpperCAmelCase ,__UpperCAmelCase : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , UpperCamelCase , UpperCamelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 139 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
a = '''longformer'''
def __init__( self : Union[str, Any] , __lowerCamelCase : Optional[Any] = 512 , __lowerCamelCase : Any = 2 , __lowerCamelCase : List[str] = 1 , __lowerCamelCase : Any = 0 , __lowerCamelCase : Optional[Any] = 2 , __lowerCamelCase : Dict = 3_0522 , __lowerCamelCase : Any = 768 , __lowerCamelCase : Any = 12 , __lowerCamelCase : Union[str, Any] = 12 , __lowerCamelCase : List[Any] = 3072 , __lowerCamelCase : Optional[int] = "gelu" , __lowerCamelCase : Dict = 0.1 , __lowerCamelCase : Any = 0.1 , __lowerCamelCase : Optional[Any] = 512 , __lowerCamelCase : Tuple = 2 , __lowerCamelCase : Optional[int] = 0.02 , __lowerCamelCase : str = 1e-12 , __lowerCamelCase : Union[str, Any] = False , **__lowerCamelCase : int , ) -> Any:
super().__init__(pad_token_id=a_ , **a_ )
SCREAMING_SNAKE_CASE__ = attention_window
SCREAMING_SNAKE_CASE__ = sep_token_id
SCREAMING_SNAKE_CASE__ = bos_token_id
SCREAMING_SNAKE_CASE__ = eos_token_id
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = onnx_export
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] = "default" , __lowerCamelCase : int = None ) -> Dict:
super().__init__(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE__ = True
@property
def lowercase_ ( self : Tuple ) -> Optional[int]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def lowercase_ ( self : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE__ = {0: "batch"}
return outputs
@property
def lowercase_ ( self : Union[str, Any] ) -> Optional[int]:
return 1e-4
@property
def lowercase_ ( self : List[Any] ) -> List[Any]:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def lowercase_ ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : Optional[Any] = False , __lowerCamelCase : Tuple = None , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = super().generate_dummy_inputs(
preprocessor=a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE__ = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
SCREAMING_SNAKE_CASE__ = 1
return inputs
| 707 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
a = AltDiffusionPipeline
a = TEXT_TO_IMAGE_PARAMS
a = TEXT_TO_IMAGE_BATCH_PARAMS
a = TEXT_TO_IMAGE_IMAGE_PARAMS
a = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self : Tuple ) -> Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
SCREAMING_SNAKE_CASE__ = CLIPTextModel(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
SCREAMING_SNAKE_CASE__ = 77
SCREAMING_SNAKE_CASE__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int]=0 ) -> Union[str, Any]:
if str(__lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : List[Any] ) -> str:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def lowercase_ ( self : List[Any] ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase_ ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE__ = RobertaSeriesModelWithTransformation(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = text_encoder
SCREAMING_SNAKE_CASE__ = AltDiffusionPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''A photo of an astronaut'''
SCREAMING_SNAKE_CASE__ = alt_pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE__ = RobertaSeriesModelWithTransformation(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = text_encoder
SCREAMING_SNAKE_CASE__ = AltDiffusionPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = alt_pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Tuple ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : str ) -> Any:
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = alt_pipe([prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = alt_pipe([prompt] , generator=__lowerCamelCase , num_inference_steps=2 , output_type='''numpy''' )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 472 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase_ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int]=8 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
SCREAMING_SNAKE_CASE__ :Optional[int] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _SCREAMING_SNAKE_CASE( _lowerCamelCase ):
def __init__( self : Dict , UpperCamelCase_ : MultilingualCLIP , UpperCamelCase_ : XLMRobertaTokenizer , UpperCamelCase_ : UNetaDConditionModel , UpperCamelCase_ : Union[DDIMScheduler, DDPMScheduler] , UpperCamelCase_ : VQModel , ) -> Dict:
super().__init__()
self.register_modules(
text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , movq=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE__ :Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCamelCase ( self : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] ) -> Optional[Any]:
if latents is None:
SCREAMING_SNAKE_CASE__ :Tuple = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
SCREAMING_SNAKE_CASE__ :Optional[Any] = latents.to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any]=None , ) -> Tuple:
SCREAMING_SNAKE_CASE__ :Optional[int] = len(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE__ :List[str] = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=77 , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = text_inputs.input_ids
SCREAMING_SNAKE_CASE__ :List[str] = self.tokenizer(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ :Tuple = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
SCREAMING_SNAKE_CASE__ :Optional[Any] = text_input_ids.to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ :Any = text_inputs.attention_mask.to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ :Dict = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ :Dict = prompt_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
SCREAMING_SNAKE_CASE__ :Optional[int] = text_encoder_hidden_states.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
SCREAMING_SNAKE_CASE__ :Tuple = text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ :List[str]
if negative_prompt is None:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = [''] * batch_size
elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !='''
f''' {type(_SCREAMING_SNAKE_CASE )}.''' )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ :Any = [negative_prompt]
elif batch_size != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
SCREAMING_SNAKE_CASE__ :List[Any] = negative_prompt
SCREAMING_SNAKE_CASE__ :Optional[Any] = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=77 , truncation=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
SCREAMING_SNAKE_CASE__ :Dict = uncond_input.input_ids.to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ :List[Any] = uncond_input.attention_mask.to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ :Any = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE__ :List[Any] = negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE__ :Optional[Any] = negative_prompt_embeds.repeat(1 , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ :Tuple = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ :Optional[Any] = uncond_text_encoder_hidden_states.shape[1]
SCREAMING_SNAKE_CASE__ :int = uncond_text_encoder_hidden_states.repeat(1 , _SCREAMING_SNAKE_CASE , 1 )
SCREAMING_SNAKE_CASE__ :Tuple = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 )
SCREAMING_SNAKE_CASE__ :str = uncond_text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE__ :List[str] = torch.cat([negative_prompt_embeds, prompt_embeds] )
SCREAMING_SNAKE_CASE__ :Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
SCREAMING_SNAKE_CASE__ :Dict = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowerCamelCase ( self : List[Any] , UpperCamelCase_ : Any=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE__ :Dict = torch.device(f'''cuda:{gpu_id}''' )
SCREAMING_SNAKE_CASE__ :Dict = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : Optional[int]=0 ) -> Optional[Any]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE__ :List[Any] = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE__ :List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
SCREAMING_SNAKE_CASE__ :Dict = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
if self.safety_checker is not None:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = cpu_offload_with_hook(self.safety_checker , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE__ :List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCamelCase ( self : str ) -> Any:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self : Dict , UpperCamelCase_ : Union[str, List[str]] , UpperCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_ : Optional[Union[str, List[str]]] = None , UpperCamelCase_ : int = 5_12 , UpperCamelCase_ : int = 5_12 , UpperCamelCase_ : int = 1_00 , UpperCamelCase_ : float = 4.0 , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ) -> Any:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ :List[str] = 1
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ :Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}''' )
SCREAMING_SNAKE_CASE__ :Optional[int] = self._execution_device
SCREAMING_SNAKE_CASE__ :str = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE__ :str = guidance_scale > 1.0
SCREAMING_SNAKE_CASE__ :Optional[int] = self._encode_prompt(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ :str = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ :Tuple = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ :int = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
SCREAMING_SNAKE_CASE__ :str = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
SCREAMING_SNAKE_CASE__ :Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ :List[Any] = self.scheduler.timesteps
SCREAMING_SNAKE_CASE__ :str = self.unet.config.in_channels
SCREAMING_SNAKE_CASE__ :int = get_new_h_w(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.movq_scale_factor )
# create initial latent
SCREAMING_SNAKE_CASE__ :List[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE__ :str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE__ :str = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE__ :Optional[int] = self.unet(
sample=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , added_cond_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ :Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE__ :int = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE__ :Optional[Any] = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE__ :List[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE__ :Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__ :int = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample
# post-processing
SCREAMING_SNAKE_CASE__ :List[Any] = self.movq.decode(_SCREAMING_SNAKE_CASE , force_not_quantize=_SCREAMING_SNAKE_CASE )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE__ :List[str] = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE__ :List[str] = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ :Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ :Dict = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 209 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A_ : int = get_tests_dir('fixtures')
A_ : int = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
A_ : Dict = get_tests_dir('fixtures/dummy-config.json')
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = 0
def _lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : List[Any] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
SCREAMING_SNAKE_CASE : Optional[Any] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE ).to_dict()
config_dict.pop('feature_extractor_type' )
SCREAMING_SNAKE_CASE : int = WavaVecaFeatureExtractor(**_SCREAMING_SNAKE_CASE )
# save in new folder
model_config.save_pretrained(_SCREAMING_SNAKE_CASE )
config.save_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , 'bert-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE : str = AutoFeatureExtractor.from_pretrained('bert-base' )
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE , revision='aaaaaa' )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
SCREAMING_SNAKE_CASE : int = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def _lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : int = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
try:
AutoConfig.register('custom' , _SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE : Union[str, Any] = CustomFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = True
try:
AutoConfig.register('custom' , _SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE : str = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE : List[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(_SCREAMING_SNAKE_CASE , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 265 | 0 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
__snake_case =logging.getLogger()
def a_ ( ):
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('-f' )
lowerCAmelCase = parser.parse_args()
return args.f
def a_ ( lowerCamelCase : Optional[Any] ):
lowerCAmelCase = {}
lowerCAmelCase = os.path.join(lowerCamelCase , 'all_results.json' )
if os.path.exists(lowerCamelCase ):
with open(lowerCamelCase , 'r' ) as f:
lowerCAmelCase = json.load(lowerCamelCase )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
def a_ ( ):
lowerCAmelCase = torch.cuda.is_available() and torch_device == 'cuda'
return is_using_cuda and is_apex_available()
__snake_case =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( __lowercase ):
@classmethod
def __UpperCAmelCase ( cls : List[str] ) -> int:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = os.path.join(cls.tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def __UpperCAmelCase ( cls : Optional[int] ) -> str:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(UpperCAmelCase__ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , 'glue_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(UpperCAmelCase__ )
self.assertLess(result['perplexity'] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , 'clm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(UpperCAmelCase__ )
self.assertLess(result['perplexity'] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , 'mlm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
lowerCAmelCase = 7 if get_gpu_count() > 1 else 2
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(UpperCAmelCase__ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertLess(result['train_loss'] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , 'ner_no_trainer' ) ) )
@unittest.skip(reason='Fix me @muellerzr' )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def __UpperCAmelCase ( self : Dict ) -> Dict:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(UpperCAmelCase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['eval_f1'] , 2_8 )
self.assertGreaterEqual(result['eval_exact'] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , 'qa_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(UpperCAmelCase__ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , 'swag_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def __UpperCAmelCase ( self : Any ) -> List[str]:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(UpperCAmelCase__ )
self.assertGreaterEqual(result['eval_rouge1'] , 1_0 )
self.assertGreaterEqual(result['eval_rouge2'] , 2 )
self.assertGreaterEqual(result['eval_rougeL'] , 7 )
self.assertGreaterEqual(result['eval_rougeLsum'] , 7 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , 'summarization_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(UpperCAmelCase__ )
self.assertGreaterEqual(result['eval_bleu'] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , 'translation_no_trainer' ) ) )
@slow
def __UpperCAmelCase ( self : Dict ) -> int:
lowerCAmelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(UpperCAmelCase__ )
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(UpperCAmelCase__ )
self.assertGreaterEqual(result['eval_overall_accuracy'] , 0.10 )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
lowerCAmelCase = get_results(UpperCAmelCase__ )
# The base model scores a 25%
self.assertGreaterEqual(result['eval_accuracy'] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , 'step_1' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , 'image_classification_no_trainer' ) ) )
| 513 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = ['''pixel_values''']
def __init__( self : int , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : Dict , ) -> None:
super().__init__(**UpperCAmelCase__ )
lowerCAmelCase = size if size is not None else {'height': 3_8_4, 'width': 3_8_4}
lowerCAmelCase = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = resample
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase = do_convert_rgb
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Union[str, Any] , ) -> np.ndarray:
lowerCAmelCase = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
lowerCAmelCase = (size['height'], size['width'])
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] , ) -> str:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[Any] , ) -> np.ndarray:
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Dict[str, int]] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : int , ) -> PIL.Image.Image:
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
lowerCAmelCase = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase = [convert_to_rgb(UpperCAmelCase__ ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
lowerCAmelCase = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
lowerCAmelCase = BatchFeature(data={'pixel_values': images} , tensor_type=UpperCAmelCase__ )
return encoded_outputs
| 513 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if n_term == "":
return []
_lowerCAmelCase = []
for temp in range(int(lowerCAmelCase ) ):
series.append(f"1/{temp + 1}" if series else """1""" )
return series
if __name__ == "__main__":
A__ : Union[str, Any] =input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 207 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
A__ : Any =None
A__ : Optional[int] =logging.get_logger(__name__)
A__ : Union[str, Any] ={'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : List[str] ={
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
A__ : List[str] ={
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
A__ : List[str] ='''▁'''
# Segments (not really needed)
A__ : str =0
A__ : str =1
A__ : List[Any] =2
A__ : str =3
A__ : Optional[Any] =4
class UpperCAmelCase ( snake_case_ ):
_lowercase: Optional[int] = VOCAB_FILES_NAMES
_lowercase: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowercase: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase: Optional[Any] = '''left'''
_lowercase: Dict = XLNetTokenizer
def __init__( self : List[str] , __snake_case : Optional[Any]=None , __snake_case : str=None , __snake_case : Union[str, Any]=False , __snake_case : str=True , __snake_case : Union[str, Any]=False , __snake_case : List[Any]="<s>" , __snake_case : List[Any]="</s>" , __snake_case : str="<unk>" , __snake_case : int="<sep>" , __snake_case : int="<pad>" , __snake_case : Dict="<cls>" , __snake_case : int="<mask>" , __snake_case : Optional[int]=["<eop>", "<eod>"] , **__snake_case : List[str] , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
vocab_file=__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
_lowerCAmelCase = 3
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = remove_space
_lowerCAmelCase = keep_accents
_lowerCAmelCase = vocab_file
_lowerCAmelCase = False if not self.vocab_file else True
def lowercase__ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase__ ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase__ ( self : Optional[Any] , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 207 | 1 |
"""simple docstring"""
from numpy import exp, pi, sqrt
def lowercase ( __snake_case : int , __snake_case : float = 0.0 , __snake_case : float = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowercase ( ):
lowercase_ : Union[str, Any] = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 2_0, '''a ''' * 3_0, '''b ''' * 7],
}
lowercase_ : Any = Dataset.from_dict(__snake_case )
return dataset
class _UpperCAmelCase ( _A ):
def A ( self : str ) -> str:
lowercase_ : Tuple = get_dataset()
lowercase_ : Any = make_duplicate_clusters(A , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def A ( self : List[str] ) -> Union[str, Any]:
lowercase_ : Any = get_dataset()
lowercase_ , lowercase_ : str = deduplicate_dataset(A )
self.assertEqual(len(A ) , 2 )
print(A )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , A )
| 141 | 0 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = FileLock(str(tmpdir / 'foo.lock' ) )
UpperCAmelCase_ : List[Any] = FileLock(str(tmpdir / 'foo.lock' ) )
UpperCAmelCase_ : str = 0.01
with locka.acquire():
with pytest.raises(__snake_case ):
UpperCAmelCase_ : Tuple = time.time()
locka.acquire(__snake_case )
assert time.time() - _start > timeout
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 'a' * 1_000 + '.lock'
UpperCAmelCase_ : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__snake_case )
assert len(os.path.basename(locka._lock_file ) ) <= 255
UpperCAmelCase_ : Optional[Any] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__snake_case ):
locka.acquire(0 )
| 406 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( __snake_case : int , __snake_case : List[Any] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = LxmertConfig.from_json_file(__snake_case )
print(F"Building PyTorch model from configuration: {config}" )
UpperCAmelCase_ : Union[str, Any] = LxmertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 406 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def UpperCamelCase ( snake_case__ : Optional[int] ,snake_case__ : Optional[Any]=False ):
'''simple docstring'''
__snake_case :Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__snake_case :int = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def UpperCamelCase ( snake_case__ : Tuple ,snake_case__ : Union[str, Any] ,snake_case__ : Optional[Any]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__snake_case :Tuple = """"""
else:
__snake_case :int = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case :Any = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
__snake_case :Optional[Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__snake_case :Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
__snake_case :str = in_proj_bias[: config.hidden_size]
__snake_case :str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case :str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case :str = in_proj_weight[
-config.hidden_size :, :
]
__snake_case :List[str] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( snake_case__ : List[str] ):
'''simple docstring'''
__snake_case :Dict = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(a__ ,a__ )
def UpperCamelCase ( snake_case__ : Optional[Any] ,snake_case__ : List[Any] ,snake_case__ : Union[str, Any] ):
'''simple docstring'''
__snake_case :Dict = dct.pop(a__ )
__snake_case :int = val
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case :Optional[Any] = Image.open(requests.get(a__ ,stream=a__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( snake_case__ : Tuple ,snake_case__ : Optional[Any] ,snake_case__ : int=True ):
'''simple docstring'''
__snake_case :int = ViTConfig()
# patch_size
if model_name[-1] == "8":
__snake_case :Dict = 8
# set labels if required
if not base_model:
__snake_case :Optional[Any] = 1000
__snake_case :List[str] = """huggingface/label-files"""
__snake_case :List[Any] = """imagenet-1k-id2label.json"""
__snake_case :int = json.load(open(hf_hub_download(a__ ,a__ ,repo_type="""dataset""" ) ,"""r""" ) )
__snake_case :Optional[int] = {int(a__ ): v for k, v in idalabel.items()}
__snake_case :List[Any] = idalabel
__snake_case :str = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__snake_case :Union[str, Any] = 384
__snake_case :str = 1536
__snake_case :str = 12
__snake_case :Optional[int] = 6
# load original model from torch hub
__snake_case :Optional[int] = torch.hub.load("""facebookresearch/dino:main""" ,a__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__snake_case :Optional[Any] = original_model.state_dict()
if base_model:
remove_classification_head_(a__ )
__snake_case :int = create_rename_keys(a__ ,base_model=a__ )
for src, dest in rename_keys:
rename_key(a__ ,a__ ,a__ )
read_in_q_k_v(a__ ,a__ ,a__ )
# load HuggingFace model
if base_model:
__snake_case :Optional[Any] = ViTModel(a__ ,add_pooling_layer=a__ ).eval()
else:
__snake_case :Dict = ViTForImageClassification(a__ ).eval()
model.load_state_dict(a__ )
# Check outputs on an image, prepared by ViTImageProcessor
__snake_case :Dict = ViTImageProcessor()
__snake_case :Dict = image_processor(images=prepare_img() ,return_tensors="""pt""" )
__snake_case :int = encoding["""pixel_values"""]
__snake_case :List[str] = model(a__ )
if base_model:
__snake_case :Tuple = original_model(a__ )
assert torch.allclose(a__ ,outputs.last_hidden_state[:, 0, :] ,atol=1e-1 )
else:
__snake_case :List[str] = original_model(a__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(a__ ,outputs.logits ,atol=1e-3 )
Path(a__ ).mkdir(exist_ok=a__ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
lowerCamelCase__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 716 |
import os
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :List[str] = os.path.dirname(os.path.realpath(snake_case__ ) )
__snake_case :Union[str, Any] = os.path.join(snake_case__ ,"""triangle.txt""" )
with open(snake_case__ ) as f:
__snake_case :int = f.readlines()
__snake_case :int = []
for line in triangle:
__snake_case :List[Any] = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(snake_case__ ) )
a.append(snake_case__ )
for i in range(1 ,len(snake_case__ ) ):
for j in range(len(a[i] ) ):
__snake_case :Union[str, Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
__snake_case :Dict = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(snake_case__ ,snake_case__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 291 | 0 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase_ : Union[str, Any] = {"vocab_file": "spiece.model"}
UpperCamelCase_ : List[Any] = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
}
}
UpperCamelCase_ : Any = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
class lowerCamelCase__ ( a_ ):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ["input_ids", "attention_mask"]
UpperCamelCase__ = []
def __init__( self : List[str] ,a__ : Any ,a__ : List[Any]="<unk>" ,a__ : List[Any]="<s>" ,a__ : List[Any]="</s>" ,a__ : Union[str, Any]="<pad>" ,a__ : Any="[SEP]" ,a__ : Union[str, Any]="[MASK]" ,a__ : Dict="[CLS]" ,a__ : Optional[Any] = None ,**a__ : List[Any] ,):
a__ = AddedToken(a__ ,lstrip=a__ ,rstrip=a__ ) if isinstance(a__ ,a__ ) else bos_token
a__ = AddedToken(a__ ,lstrip=a__ ,rstrip=a__ ) if isinstance(a__ ,a__ ) else eos_token
a__ = AddedToken(a__ ,lstrip=a__ ,rstrip=a__ ) if isinstance(a__ ,a__ ) else unk_token
a__ = AddedToken(a__ ,lstrip=a__ ,rstrip=a__ ) if isinstance(a__ ,a__ ) else pad_token
a__ = AddedToken(a__ ,lstrip=a__ ,rstrip=a__ ) if isinstance(a__ ,a__ ) else cls_token
a__ = AddedToken(a__ ,lstrip=a__ ,rstrip=a__ ) if isinstance(a__ ,a__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a__ = AddedToken(a__ ,lstrip=a__ ,rstrip=a__ ) if isinstance(a__ ,a__ ) else mask_token
a__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ ,eos_token=a__ ,unk_token=a__ ,pad_token=a__ ,sep_token=a__ ,mask_token=a__ ,cls_token=a__ ,sp_model_kwargs=self.sp_model_kwargs ,**a__ ,)
a__ = vocab_file
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@property
def lowerCAmelCase_ ( self : str ):
return self.sp_model.get_piece_size()
def lowerCAmelCase_ ( self : Union[str, Any] ):
a__ = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
a__ = self.__dict__.copy()
a__ = None
return state
def __setstate__( self : int ,a__ : int ):
a__ = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
a__ = {}
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self : str ,a__ : str ):
return self.sp_model.encode(a__ ,out_type=a__ )
def lowerCAmelCase_ ( self : str ,a__ : Tuple ):
return self.sp_model.piece_to_id(a__ )
def lowerCAmelCase_ ( self : List[str] ,a__ : int ):
a__ = self.sp_model.IdToPiece(a__ )
return token
def lowerCAmelCase_ ( self : str ,a__ : Dict ):
a__ = []
a__ = ""
a__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
a__ = True
a__ = []
else:
current_sub_tokens.append(a__ )
a__ = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def lowerCAmelCase_ ( self : Optional[int] ,a__ : Dict ,a__ : str = False ,a__ : Dict = None ,a__ : Tuple = True ,**a__ : Optional[Any] ,):
a__ = kwargs.pop("use_source_tokenizer" ,a__ )
a__ = self.convert_ids_to_tokens(a__ ,skip_special_tokens=a__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
a__ = []
a__ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a__ ) )
a__ = []
sub_texts.append(a__ )
else:
current_sub_text.append(a__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
a__ = re.sub(r" (\[(MASK|SEP)\])" ,r"\1" ," ".join(a__ ) )
else:
a__ = "".join(a__ )
a__ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
a__ = self.clean_up_tokenization(a__ )
return clean_text
else:
return text
def lowerCAmelCase_ ( self : List[str] ,a__ : Optional[int] ,a__ : List[str] = None ):
if not os.path.isdir(a__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
a__ = os.path.join(
a__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ ,"wb" ) as fi:
a__ = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
def lowerCAmelCase_ ( self : str ,a__ : Dict ,a__ : Dict = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a__ = [self.cls_token_id]
a__ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : int ,a__ : int ,a__ : List[Any] = None ,a__ : str = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ ,token_ids_a=a__ ,already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def lowerCAmelCase_ ( self : Union[str, Any] ,a__ : List[str] ,a__ : Optional[int] = None ):
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 331 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : Any = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 457 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'],
'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'],
'processing_wav2vec2': ['Wav2Vec2Processor'],
'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Wav2Vec2ForAudioFrameClassification',
'Wav2Vec2ForCTC',
'Wav2Vec2ForMaskedLM',
'Wav2Vec2ForPreTraining',
'Wav2Vec2ForSequenceClassification',
'Wav2Vec2ForXVector',
'Wav2Vec2Model',
'Wav2Vec2PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWav2Vec2ForCTC',
'TFWav2Vec2Model',
'TFWav2Vec2PreTrainedModel',
'TFWav2Vec2ForSequenceClassification',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'FlaxWav2Vec2ForCTC',
'FlaxWav2Vec2ForPreTraining',
'FlaxWav2Vec2Model',
'FlaxWav2Vec2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
a_ :Dict =["""image_processor""", """feature_extractor"""]
a_ :str ="""TvltImageProcessor"""
a_ :str ="""TvltFeatureExtractor"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
super().__init__(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
__a = image_processor
__a = feature_extractor
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : List[str]=False , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
__a = None
if images is not None:
__a = self.image_processor(SCREAMING_SNAKE_CASE__ , mask_pixel=SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if images_mixed is not None:
__a = self.image_processor(SCREAMING_SNAKE_CASE__ , is_mixed=SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if audio is not None:
__a = self.feature_extractor(
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , mask_audio=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a = {}
if audio is not None:
output_dict.update(SCREAMING_SNAKE_CASE__ )
if images is not None:
output_dict.update(SCREAMING_SNAKE_CASE__ )
if images_mixed_dict is not None:
output_dict.update(SCREAMING_SNAKE_CASE__ )
return output_dict
@property
def __a ( self : List[str] ):
'''simple docstring'''
__a = self.image_processor.model_input_names
__a = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 201 | 0 |
def UpperCamelCase_( snake_case__: Tuple ) -> Optional[int]:
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
while i * i <= n:
UpperCAmelCase__ = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCamelCase_( ) -> Dict:
UpperCAmelCase__ = 1
UpperCAmelCase__ = 1
while True:
i += 1
t_num += i
if count_divisors(snake_case__ ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 146 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """perceiver"""
def __init__(self , __a=256 , __a=1280 , __a=768 , __a=1 , __a=26 , __a=8 , __a=8 , __a=None , __a=None , __a="kv" , __a=1 , __a=1 , __a="gelu" , __a=0.1 , __a=0.02 , __a=1E-1_2 , __a=True , __a=262 , __a=2048 , __a=56 , __a=[368, 496] , __a=16 , __a=1920 , __a=16 , __a=[1, 16, 224, 224] , **__a , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**__a )
UpperCAmelCase__ = num_latents
UpperCAmelCase__ = d_latents
UpperCAmelCase__ = d_model
UpperCAmelCase__ = num_blocks
UpperCAmelCase__ = num_self_attends_per_block
UpperCAmelCase__ = num_self_attention_heads
UpperCAmelCase__ = num_cross_attention_heads
UpperCAmelCase__ = qk_channels
UpperCAmelCase__ = v_channels
UpperCAmelCase__ = cross_attention_shape_for_attention
UpperCAmelCase__ = self_attention_widening_factor
UpperCAmelCase__ = cross_attention_widening_factor
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = use_query_residual
# masked language modeling attributes
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
# image classification attributes
UpperCAmelCase__ = image_size
# flow attributes
UpperCAmelCase__ = train_size
# multimodal autoencoding attributes
UpperCAmelCase__ = num_frames
UpperCAmelCase__ = audio_samples_per_frame
UpperCAmelCase__ = samples_per_patch
UpperCAmelCase__ = output_shape
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
@property
def UpperCamelCase__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def UpperCamelCase__ (self ) -> float:
"""simple docstring"""
return 1E-4
def UpperCamelCase__ (self , __a , __a = -1 , __a = -1 , __a = -1 , __a = False , __a = None , __a = 3 , __a = 40 , __a = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
if isinstance(__a , __a ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase__ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase__ = preprocessor.num_special_tokens_to_add(__a )
UpperCAmelCase__ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase__ = [' '.join(['a'] ) * seq_length] * batch_size
UpperCAmelCase__ = dict(preprocessor(__a , return_tensors=__a ) )
UpperCAmelCase__ = inputs.pop('input_ids' )
return inputs
elif isinstance(__a , __a ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase__ = compute_effective_axis_dimension(__a , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCAmelCase__ = self._generate_dummy_images(__a , __a , __a , __a )
UpperCAmelCase__ = dict(preprocessor(images=__a , return_tensors=__a ) )
UpperCAmelCase__ = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 146 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {"""vocab_file""": """spm_char.model"""}
_lowerCamelCase : List[Any] = {
"""vocab_file""": {
"""microsoft/speecht5_asr""": """https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model""",
"""microsoft/speecht5_tts""": """https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model""",
"""microsoft/speecht5_vc""": """https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model""",
}
}
_lowerCamelCase : str = {
"""microsoft/speecht5_asr""": 1_024,
"""microsoft/speecht5_tts""": 1_024,
"""microsoft/speecht5_vc""": 1_024,
}
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : Any = VOCAB_FILES_NAMES
UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , snake_case : int , snake_case : Dict="<s>" , snake_case : List[str]="</s>" , snake_case : List[str]="<unk>" , snake_case : Optional[Any]="<pad>" , snake_case : Optional[Dict[str, Any]] = None , **snake_case : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , pad_token=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
SCREAMING_SNAKE_CASE : Any = vocab_file
SCREAMING_SNAKE_CASE : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.__dict__.copy()
SCREAMING_SNAKE_CASE : Union[str, Any] = None
return state
def __setstate__( self : Optional[int] , snake_case : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase_ ( self : Optional[int] , snake_case : str ):
'''simple docstring'''
return self.sp_model.encode(snake_case , out_type=snake_case )
def lowerCamelCase_ ( self : str , snake_case : Dict ):
'''simple docstring'''
return self.sp_model.piece_to_id(snake_case )
def lowerCamelCase_ ( self : int , snake_case : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.sp_model.IdToPiece(snake_case )
return token
def lowerCamelCase_ ( self : List[str] , snake_case : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Tuple = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case ) + token
SCREAMING_SNAKE_CASE : List[Any] = []
else:
current_sub_tokens.append(snake_case )
out_string += self.sp_model.decode(snake_case )
return out_string.strip()
def lowerCamelCase_ ( self : Optional[Any] , snake_case : Optional[Any] , snake_case : str=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Any , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
SCREAMING_SNAKE_CASE : List[Any] = [1]
if token_ids_a is None:
return ([0] * len(snake_case )) + suffix_ones
return ([0] * len(snake_case )) + ([0] * len(snake_case )) + suffix_ones
def lowerCamelCase_ ( self : Optional[Any] , snake_case : str , snake_case : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE : Tuple = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , 'wb' ) as fi:
SCREAMING_SNAKE_CASE : Tuple = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,) | 308 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowerCamelCase : Any = ["""text""", """image""", """audio"""]
def __a ( __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE : List[str] = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
inputs.append(create_inputs(__lowerCAmelCase ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def __a ( __lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Any = []
for output in outputs:
if isinstance(__lowerCAmelCase , (str, AgentText) ):
output_types.append('text' )
elif isinstance(__lowerCAmelCase , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(__lowerCAmelCase , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class lowercase :
'''simple docstring'''
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
SCREAMING_SNAKE_CASE : Any = self.tool.inputs
for _input in inputs:
if isinstance(_input , snake_case ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE : Tuple = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tool(*snake_case )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE : List[Any] = [outputs]
self.assertListEqual(output_types(snake_case ) , self.tool.outputs )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE : Any = self.tool(*snake_case )
if not isinstance(snake_case , snake_case ):
SCREAMING_SNAKE_CASE : int = [outputs]
self.assertEqual(len(snake_case ) , len(self.tool.outputs ) )
for output, output_type in zip(snake_case , self.tool.outputs ):
SCREAMING_SNAKE_CASE : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(snake_case , snake_case ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE : Tuple = []
for _input, input_type in zip(snake_case , self.tool.inputs ):
if isinstance(snake_case , snake_case ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE : Optional[int] = self.tool(*snake_case )
if not isinstance(snake_case , snake_case ):
SCREAMING_SNAKE_CASE : List[Any] = [outputs]
self.assertEqual(len(snake_case ) , len(self.tool.outputs ) ) | 308 | 1 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
__A : Any = True
from torch.cuda.amp import autocast
__A : Dict = logging.getLogger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCamelCase__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase__ = field(
default=a_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase__ = field(
default=a_ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
lowerCamelCase__ = field(
default=a_ , metadata={"help": "Whether to log verbose messages or not."} , )
lowerCamelCase__ = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
lowerCamelCase__ = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
lowerCamelCase__ = field(
default=0.999_995 , metadata={"help": "Decay of gumbel temperature during training."} )
def __a ( A__ : Optional[Any] , A__ : str ):
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE = logging.WARNING
if model_args.verbose_logging:
SCREAMING_SNAKE_CASE = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
SCREAMING_SNAKE_CASE = logging.INFO
logger.setLevel(A__ )
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCamelCase__ = field(
default=a_ , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
lowerCamelCase__ = field(
default=a_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCamelCase__ = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to \'train\'"
} , )
lowerCamelCase__ = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to \'validation\'"
)
} , )
lowerCamelCase__ = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to \'file\'"} , )
lowerCamelCase__ = field(
default=a_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
lowerCamelCase__ = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there\'s no validation split"
} , )
lowerCamelCase__ = field(
default=a_ , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCamelCase__ = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCamelCase__ = 4_2
lowerCamelCase__ = 4_2
lowerCamelCase__ = "longest"
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self : Tuple , __lowerCamelCase : List[Dict[str, Union[List[int], torch.Tensor]]] ):
SCREAMING_SNAKE_CASE = self.feature_extractor.pad(
lowerCAmelCase__ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
SCREAMING_SNAKE_CASE = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
SCREAMING_SNAKE_CASE = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
SCREAMING_SNAKE_CASE = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
SCREAMING_SNAKE_CASE = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
SCREAMING_SNAKE_CASE = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=lowerCAmelCase__ , min_masks=2 , )
return batch
class _SCREAMING_SNAKE_CASE ( a_ ):
'''simple docstring'''
def __init__( self : str , *__lowerCamelCase : str , __lowerCamelCase : Tuple=1 , __lowerCamelCase : int=0 , __lowerCamelCase : List[Any]=1.0 , **__lowerCamelCase : Union[str, Any] ):
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = max_gumbel_temp
SCREAMING_SNAKE_CASE = min_gumbel_temp
SCREAMING_SNAKE_CASE = gumbel_temp_decay
def _snake_case ( self : Optional[int] , __lowerCamelCase : nn.Module , __lowerCamelCase : Dict[str, Union[torch.Tensor, Any]] ):
model.train()
SCREAMING_SNAKE_CASE = self._prepare_inputs(lowerCAmelCase__ )
if self.use_amp:
with autocast():
SCREAMING_SNAKE_CASE = self.compute_loss(lowerCAmelCase__ , lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = self.compute_loss(lowerCAmelCase__ , lowerCAmelCase__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
SCREAMING_SNAKE_CASE = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
SCREAMING_SNAKE_CASE = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']" )
if self.args.gradient_accumulation_steps > 1:
SCREAMING_SNAKE_CASE = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def __a ( ):
SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
configure_logger(A__ , A__ )
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
SCREAMING_SNAKE_CASE = DatasetDict()
SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
SCREAMING_SNAKE_CASE = DatasetDict()
SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=A__ )
def prepare_dataset(A__ : Optional[int] ):
# check that all files have the correct sampling rate
SCREAMING_SNAKE_CASE = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
SCREAMING_SNAKE_CASE = datasets.map(
A__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
SCREAMING_SNAKE_CASE = vectorized_datasets.filter(
lambda A__ : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(A__ : Optional[int] ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
SCREAMING_SNAKE_CASE = vectorized_datasets.map(
A__ , batched=A__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
SCREAMING_SNAKE_CASE = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
SCREAMING_SNAKE_CASE = WavaVecaForPreTraining(A__ )
SCREAMING_SNAKE_CASE = DataCollatorForWavaVecaPretraining(model=A__ , feature_extractor=A__ )
SCREAMING_SNAKE_CASE = WavaVecaPreTrainer(
model=A__ , data_collator=A__ , args=A__ , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=A__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main() | 16 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''roc_bert'''
def __init__( self :Dict , lowerCAmelCase__ :Optional[Any]=30_522 , lowerCAmelCase__ :Dict=768 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=3_072 , lowerCAmelCase__ :Any="gelu" , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :Optional[int]=0.0_2 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[str]=0 , lowerCAmelCase__ :Optional[Any]="absolute" , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :List[str]=768 , lowerCAmelCase__ :Optional[Any]=910 , lowerCAmelCase__ :str=512 , lowerCAmelCase__ :int=24_858 , lowerCAmelCase__ :List[Any]=True , **lowerCAmelCase__ :int , ) -> List[str]:
'''simple docstring'''
snake_case_ : int = vocab_size
snake_case_ : Dict = max_position_embeddings
snake_case_ : int = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : Dict = initializer_range
snake_case_ : str = type_vocab_size
snake_case_ : Tuple = layer_norm_eps
snake_case_ : Optional[Any] = use_cache
snake_case_ : Optional[Any] = enable_pronunciation
snake_case_ : List[Any] = enable_shape
snake_case_ : Optional[int] = pronunciation_embed_dim
snake_case_ : Dict = pronunciation_vocab_size
snake_case_ : int = shape_embed_dim
snake_case_ : Any = shape_vocab_size
snake_case_ : Optional[int] = concat_input
snake_case_ : List[Any] = position_embedding_type
snake_case_ : Any = classifier_dropout
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 653 | 0 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
snake_case = "true"
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__=82 , lowerCAmelCase__=16 ):
"""simple docstring"""
set_seed(42 )
_lowerCAmelCase : Optional[Any] = RegressionModel()
_lowerCAmelCase : Union[str, Any] = deepcopy(lowerCAmelCase__ )
_lowerCAmelCase : Dict = RegressionDataset(length=lowerCAmelCase__ )
_lowerCAmelCase : Optional[int] = DataLoader(lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
model.to(accelerator.device )
_lowerCAmelCase , _lowerCAmelCase : List[str] = accelerator.prepare(lowerCAmelCase__ , lowerCAmelCase__ )
return model, ddp_model, dataloader
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__=False ):
"""simple docstring"""
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
_lowerCAmelCase : List[Any] = load_dataset("glue" , "mrpc" , split="validation" )
def tokenize_function(lowerCAmelCase__ ):
_lowerCAmelCase : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
with accelerator.main_process_first():
_lowerCAmelCase : Tuple = dataset.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=["idx", "sentence1", "sentence2"] , )
_lowerCAmelCase : Union[str, Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCAmelCase__ ):
if use_longest:
return tokenizer.pad(lowerCAmelCase__ , padding="longest" , return_tensors="pt" )
return tokenizer.pad(lowerCAmelCase__ , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return DataLoader(lowerCAmelCase__ , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=16 )
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = Accelerator(dispatch_batches=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
_lowerCAmelCase : Dict = get_dataloader(lowerCAmelCase__ , not dispatch_batches )
_lowerCAmelCase : Dict = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" , return_dict=lowerCAmelCase__ )
_lowerCAmelCase , _lowerCAmelCase : str = accelerator.prepare(lowerCAmelCase__ , lowerCAmelCase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = []
for batch in dataloader:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = batch.values()
with torch.no_grad():
_lowerCAmelCase : str = model(lowerCAmelCase__ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_lowerCAmelCase , _lowerCAmelCase : Dict = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCAmelCase__ )
targs.append(lowerCAmelCase__ )
_lowerCAmelCase , _lowerCAmelCase : int = torch.cat(lowerCAmelCase__ ), torch.cat(lowerCAmelCase__ )
return logits, targs
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__=82 , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=16 ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = get_basic_setup(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_lowerCAmelCase , _lowerCAmelCase : str = generate_predictions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
assert (
len(lowerCAmelCase__ ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCAmelCase__ )}"""
def UpperCamelCase_ ( lowerCAmelCase__ = False , lowerCAmelCase__ = False ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = evaluate.load("glue" , "mrpc" )
_lowerCAmelCase , _lowerCAmelCase : int = get_mrpc_setup(lowerCAmelCase__ , lowerCAmelCase__ )
# First do baseline
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = setup["no"]
model.to(lowerCAmelCase__ )
model.eval()
for batch in dataloader:
batch.to(lowerCAmelCase__ )
with torch.inference_mode():
_lowerCAmelCase : Optional[int] = model(**lowerCAmelCase__ )
_lowerCAmelCase : str = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowerCAmelCase__ , references=batch["labels"] )
_lowerCAmelCase : Optional[int] = metric.compute()
# Then do distributed
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
_lowerCAmelCase : Any = model(**lowerCAmelCase__ )
_lowerCAmelCase : int = outputs.logits.argmax(dim=-1 )
_lowerCAmelCase : Union[str, Any] = batch["labels"]
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : int = Accelerator(split_batches=lowerCAmelCase__ , dispatch_batches=lowerCAmelCase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(lowerCAmelCase__ , lowerCAmelCase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_lowerCAmelCase : Tuple = Accelerator(split_batches=lowerCAmelCase__ , dispatch_batches=lowerCAmelCase__ )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(lowerCAmelCase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
_lowerCAmelCase : List[str] = Accelerator()
test_torch_metrics(lowerCAmelCase__ , 5_12 )
accelerator.state._reset_state()
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 587 | from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
if isinstance(lowerCAmelCase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __A :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
_lowerCAmelCase : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(_snake_case , _snake_case )
_lowerCAmelCase : Tuple = TFVisionTextDualEncoderModel(_snake_case )
_lowerCAmelCase : Any = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase : Any = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase : Union[str, Any] = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase : Any = {"vision_model": vision_model, "text_model": text_model}
_lowerCAmelCase : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_snake_case )
_lowerCAmelCase : Dict = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase : Optional[int] = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase : Tuple = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
_lowerCAmelCase : List[str] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case )
_lowerCAmelCase : Dict = TFVisionTextDualEncoderModel.from_pretrained(_snake_case )
_lowerCAmelCase : Dict = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
_lowerCAmelCase : Optional[int] = after_output[0].numpy()
_lowerCAmelCase : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_snake_case , 1E-5 )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase : Tuple = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase : List[str] = model(
input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , output_attentions=_snake_case )
_lowerCAmelCase : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(_snake_case ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : List[Any] = to_atuple(vision_model.config.image_size )
_lowerCAmelCase : Optional[int] = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase : Any = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase : int = output.text_model_output.attentions
self.assertEqual(len(_snake_case ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case ):
_lowerCAmelCase : List[str] = np.abs((a - b) ).max()
self.assertLessEqual(_snake_case , _snake_case , F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Any = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : int = self.prepare_config_and_inputs()
self.check_save_load(**_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : int = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.get_pretrained_model_and_inputs()
_lowerCAmelCase : List[str] = model_a(**_snake_case )
_lowerCAmelCase : List[Any] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_snake_case )
_lowerCAmelCase : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(_snake_case )
_lowerCAmelCase : List[str] = model_a(**_snake_case )
_lowerCAmelCase : Any = after_outputs[0].numpy()
_lowerCAmelCase : Dict = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_snake_case , 1E-5 )
@require_tf
class __A ( snake_case__ ,unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" )
_lowerCAmelCase : Optional[int] = 13
_lowerCAmelCase : Optional[int] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase : Optional[int] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase : Optional[int] = random_attention_mask([batch_size, 4] )
_lowerCAmelCase : Tuple = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
_lowerCAmelCase : Optional[Any] = TFViTModel(_snake_case , name="vision_model" )
_lowerCAmelCase : Union[str, Any] = TFBertModel(_snake_case , name="text_model" )
return vision_model, text_model
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[Any] = TFViTModelTester(self )
_lowerCAmelCase : List[str] = TFBertModelTester(self )
_lowerCAmelCase : str = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase : int = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Dict = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __A ( snake_case__ ,unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
_lowerCAmelCase : Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" )
_lowerCAmelCase : List[Any] = 13
_lowerCAmelCase : Tuple = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase : List[str] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase : List[Any] = random_attention_mask([batch_size, 4] )
_lowerCAmelCase : Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase : Optional[int] = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase : Tuple = model(
input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , output_attentions=_snake_case )
_lowerCAmelCase : Tuple = output.vision_model_output.attentions
self.assertEqual(len(_snake_case ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCAmelCase : Any = to_atuple(vision_model.config.image_size )
_lowerCAmelCase : List[str] = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase : Dict = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase : str = output.text_model_output.attentions
self.assertEqual(len(_snake_case ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
_lowerCAmelCase : Any = TFDeiTModel(_snake_case , name="vision_model" )
_lowerCAmelCase : int = TFRobertaModel(_snake_case , name="text_model" )
return vision_model, text_model
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[int] = TFDeiTModelTester(self )
_lowerCAmelCase : Union[str, Any] = TFRobertaModelTester(self )
_lowerCAmelCase : Any = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Optional[int] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __A ( snake_case__ ,unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" )
_lowerCAmelCase : List[str] = 13
_lowerCAmelCase : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase : Tuple = random_attention_mask([batch_size, 4] )
_lowerCAmelCase : Any = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
_lowerCAmelCase : Any = TFCLIPVisionModel(_snake_case , name="vision_model" )
_lowerCAmelCase : Any = TFBertModel(_snake_case , name="text_model" )
return vision_model, text_model
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[int] = TFCLIPVisionModelTester(self )
_lowerCAmelCase : Union[str, Any] = TFBertModelTester(self )
_lowerCAmelCase : str = clip_model_tester.prepare_config_and_inputs()
_lowerCAmelCase : Dict = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase : Tuple = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = TFVisionTextDualEncoderModel.from_pretrained(
"clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=_snake_case )
_lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
_lowerCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCAmelCase : Optional[int] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_snake_case , padding=_snake_case , return_tensors="np" )
_lowerCAmelCase : List[Any] = model(**_snake_case )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_lowerCAmelCase : Any = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _snake_case , atol=1E-3 ) )
| 587 | 1 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def UpperCAmelCase__( __UpperCAmelCase : NDArray[floataa] , __UpperCAmelCase : NDArray[floataa] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int , ):
__snake_case , __snake_case : Any = coefficient_matrix.shape
__snake_case , __snake_case : List[Any] = constant_matrix.shape
if rowsa != colsa:
__snake_case : int = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(__UpperCAmelCase )
if colsa != 1:
__snake_case : str = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(__UpperCAmelCase )
if rowsa != rowsa:
__snake_case : List[str] = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(__UpperCAmelCase )
if len(__UpperCAmelCase ) != rowsa:
__snake_case : List[Any] = (
'Number of initial values must be equal to number of rows in coefficient '
F"""matrix but received {len(__UpperCAmelCase )} and {rowsa}"""
)
raise ValueError(__UpperCAmelCase )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
__snake_case : Optional[int] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__snake_case , __snake_case : Tuple = table.shape
strictly_diagonally_dominant(__UpperCAmelCase )
# Iterates the whole matrix for given number of times
for _ in range(__UpperCAmelCase ):
__snake_case : str = []
for row in range(__UpperCAmelCase ):
__snake_case : Tuple = 0
for col in range(__UpperCAmelCase ):
if col == row:
__snake_case : Dict = table[row][col]
elif col == cols - 1:
__snake_case : Tuple = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__snake_case : Union[str, Any] = (temp + val) / denom
new_val.append(__UpperCAmelCase )
__snake_case : int = new_val
return [float(__UpperCAmelCase ) for i in new_val]
def UpperCAmelCase__( __UpperCAmelCase : NDArray[floataa] ):
__snake_case , __snake_case : Dict = table.shape
__snake_case : str = True
for i in range(0 , __UpperCAmelCase ):
__snake_case : int = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 576 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=10 , snake_case=18 , snake_case=30 , snake_case=400 , snake_case=True , snake_case=None , snake_case=True , snake_case=[0.5, 0.5, 0.5] , snake_case=[0.5, 0.5, 0.5] , snake_case=None , ) -> Optional[Any]:
_UpperCAmelCase = size if size is not None else {'shortest_edge': 18}
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = num_frames
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = crop_size
def lowerCamelCase_ ( self ) -> Optional[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = VivitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = VivitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , 'image_mean' ) )
self.assertTrue(hasattr(snake_case , 'image_std' ) )
self.assertTrue(hasattr(snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case , 'do_resize' ) )
self.assertTrue(hasattr(snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(snake_case , 'size' ) )
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowerCamelCase_ ( self ) -> List[str]:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_UpperCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=snake_case )
for video in video_inputs:
self.assertIsInstance(snake_case , snake_case )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCamelCase_ ( self ) -> Optional[Any]:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case )
for video in video_inputs:
self.assertIsInstance(snake_case , snake_case )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCamelCase_ ( self ) -> str:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case )
for video in video_inputs:
self.assertIsInstance(snake_case , snake_case )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 573 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def lowercase_ ( __A : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : str =SwinConfig(image_size=1_9_2 )
if "base" in model_name:
lowercase : Dict =6
lowercase : Union[str, Any] =1_2_8
lowercase : int =(2, 2, 1_8, 2)
lowercase : str =(4, 8, 1_6, 3_2)
elif "large" in model_name:
lowercase : List[str] =1_2
lowercase : Optional[Any] =1_9_2
lowercase : int =(2, 2, 1_8, 2)
lowercase : int =(6, 1_2, 2_4, 4_8)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
lowercase : Union[str, Any] =window_size
lowercase : List[Any] =embed_dim
lowercase : Union[str, Any] =depths
lowercase : List[Any] =num_heads
return config
def lowercase_ ( __A : Tuple ) -> List[str]:
"""simple docstring"""
if "encoder.mask_token" in name:
lowercase : List[Any] =name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
lowercase : List[Any] =name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
lowercase : Union[str, Any] =name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
lowercase : Optional[Any] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowercase : Dict =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase : Any =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase : int =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase : int =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase : List[str] =name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
lowercase : List[Any] ='''layernorm.weight'''
if name == "encoder.norm.bias":
lowercase : Union[str, Any] ='''layernorm.bias'''
if "decoder" in name:
pass
else:
lowercase : Optional[int] ='''swin.''' + name
return name
def lowercase_ ( __A : Optional[Any] , __A : List[Any] ) -> Any:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase : Tuple =orig_state_dict.pop(__A )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowercase : int =key.split('''.''' )
lowercase : Union[str, Any] =int(key_split[2] )
lowercase : Any =int(key_split[4] )
lowercase : str =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase : Tuple =val[:dim, :]
lowercase : str =val[
dim : dim * 2, :
]
lowercase : Optional[int] =val[-dim:, :]
else:
lowercase : int =val[
:dim
]
lowercase : List[Any] =val[
dim : dim * 2
]
lowercase : Optional[Any] =val[
-dim:
]
else:
lowercase : Optional[Any] =val
return orig_state_dict
def lowercase_ ( __A : List[str] , __A : str , __A : str , __A : List[Any] ) -> int:
"""simple docstring"""
lowercase : Any =torch.load(__A , map_location='''cpu''' )['''model''']
lowercase : Optional[int] =get_swin_config(__A )
lowercase : Optional[Any] =SwinForMaskedImageModeling(__A )
model.eval()
lowercase : Optional[Any] =convert_state_dict(__A , __A )
model.load_state_dict(__A )
lowercase : int ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Union[str, Any] =ViTImageProcessor(size={'''height''': 1_9_2, '''width''': 1_9_2} )
lowercase : Any =Image.open(requests.get(__A , stream=__A ).raw )
lowercase : Any =image_processor(images=__A , return_tensors='''pt''' )
with torch.no_grad():
lowercase : Tuple =model(**__A ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__A )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__A )
if push_to_hub:
print(F'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(F'microsoft/{model_name}' )
image_processor.push_to_hub(F'microsoft/{model_name}' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 8 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=99 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : str=2 , UpperCAmelCase : str=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Tuple =13
lowercase : Any =7
lowercase : Union[str, Any] =True
lowercase : Any =True
lowercase : Optional[int] =True
lowercase : List[str] =True
lowercase : Tuple =99
lowercase : str =32
lowercase : Union[str, Any] =2
lowercase : Dict =4
lowercase : Union[str, Any] =37
lowercase : Union[str, Any] ='''gelu'''
lowercase : Any =0.1
lowercase : Dict =0.1
lowercase : Dict =512
lowercase : List[str] =16
lowercase : Dict =2
lowercase : int =0.0_2
lowercase : List[Any] =3
lowercase : List[str] =4
lowercase : Optional[Any] =None
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_input_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Any =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : List[Any] =None
lowercase : List[str] =None
lowercase : List[str] =None
if self.use_labels:
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =[input_ids, input_mask]
lowercase : str =model(UpperCAmelCase )
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Dict =True
lowercase : List[Any] =TFRoFormerForCausalLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerForMaskedLM(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Optional[int] =TFRoFormerForSequenceClassification(config=UpperCAmelCase )
lowercase : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.num_choices
lowercase : Tuple =TFRoFormerForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Union[str, Any] =TFRoFormerForTokenClassification(config=UpperCAmelCase )
lowercase : Tuple ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
lowercase : Tuple =TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[int] =config_and_inputs
lowercase : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModelTester(self )
lowercase : Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Any =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[str] =model(UpperCAmelCase )[0]
# TODO Replace vocab size
lowercase : Tuple =5_0000
lowercase : List[str] =[1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase : Dict =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =tf.constant([[4, 10]] )
lowercase : List[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase : Any =emba(input_ids.shape )
lowercase : List[str] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase : Tuple =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase : str =emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : str =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase : Optional[Any] =embed_positions([2, 16, 768] )[None, None, :, :]
lowercase , lowercase : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Any =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase : int =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 8 | 1 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class lowerCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Optional[int] ):
__A : str = get_activation("""swish""" )
self.assertIsInstance(__A , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCAmelCase_ ( self : Optional[int] ):
__A : List[str] = get_activation("""silu""" )
self.assertIsInstance(__A , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCAmelCase_ ( self : Tuple ):
__A : Optional[int] = get_activation("""mish""" )
self.assertIsInstance(__A , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCAmelCase_ ( self : int ):
__A : int = get_activation("""gelu""" )
self.assertIsInstance(__A , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 17 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : Tuple = logging.get_logger(__name__)
__A : int = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class A_ (a_ , a_ ):
UpperCAmelCase__ = '''focalnet'''
def __init__( self , _A=2_2_4 , _A=4 , _A=3 , _A=9_6 , _A=False , _A=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , _A=[2, 2, 6, 2] , _A=[2, 2, 2, 2] , _A=[3, 3, 3, 3] , _A="gelu" , _A=4.0 , _A=0.0 , _A=0.1 , _A=False , _A=1E-4 , _A=False , _A=False , _A=False , _A=0.02 , _A=1E-5 , _A=3_2 , _A=None , _A=None , **_A , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = embed_dim
UpperCAmelCase = use_conv_embed
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = focal_levels
UpperCAmelCase = focal_windows
UpperCAmelCase = hidden_act
UpperCAmelCase = mlp_ratio
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = drop_path_rate
UpperCAmelCase = use_layerscale
UpperCAmelCase = layerscale_value
UpperCAmelCase = use_post_layernorm
UpperCAmelCase = use_post_layernorm_in_modulation
UpperCAmelCase = normalize_modulator
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = encoder_stride
UpperCAmelCase = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=_A , out_indices=_A , stage_names=self.stage_names )
| 130 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase__ = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
lowerCAmelCase__ = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
lowerCAmelCase__ = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class __magic_name__ ( _snake_case ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = SqueezeBertTokenizer
def __init__( self : Tuple , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Union[str, Any]="[UNK]" , lowerCAmelCase__ : Any="[SEP]" , lowerCAmelCase__ : Optional[Any]="[PAD]" , lowerCAmelCase__ : str="[CLS]" , lowerCAmelCase__ : Dict="[MASK]" , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Union[str, Any]=None , **lowerCAmelCase__ : Any , ) -> List[str]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(lowerCAmelCase__ , normalizer_state.pop("type" ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**lowerCAmelCase__ )
UpperCAmelCase = do_lower_case
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str=None ) -> List[Any]:
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 1 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __magic_name__ :
def __init__( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase = ""
UpperCAmelCase = ""
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = 2_5_6
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Optional[Any] ) -> List[str]:
UpperCAmelCase = cva.imread(lowerCAmelCase__ , 0 )
UpperCAmelCase = copy.deepcopy(self.img )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="x" )
UpperCAmelCase = np.sum(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase = x[i] / self.k
self.sk += prk
UpperCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase = int(last % last )
UpperCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase__ )
UpperCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def _UpperCamelCase ( self : str ) -> int:
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase__ = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
lowerCAmelCase__ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 1 | 1 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
"""simple docstring"""
lowerCamelCase : List[Any] =""
lowerCamelCase : int =(
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCamelCase : Union[str, Any] =None # compression type in fsspec. ex: "gzip"
lowerCamelCase : str =None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Any , lowerCAmelCase : str = "" , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[dict] = None , **lowerCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
__lowerCAmelCase : Any = fsspec.open(
__snake_case , mode="""rb""" , protocol=__snake_case , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
__lowerCAmelCase : Union[str, Any] = os.path.basename(self.file.path.split("""::""" )[0] )
__lowerCAmelCase : Optional[int] = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
__lowerCAmelCase : Optional[Any] = None
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , lowerCAmelCase : str ) -> int:
"""simple docstring"""
return super()._strip_protocol(__snake_case ).lstrip("""/""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
"""simple docstring"""
if self.dir_cache is None:
__lowerCAmelCase : Optional[Any] = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
__lowerCAmelCase : Optional[Any] = {f["""name"""]: f}
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.file.open().read()
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str = "rb" , lowerCAmelCase : str=None , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Any=None , **lowerCAmelCase : Any , ) -> int:
"""simple docstring"""
__lowerCAmelCase : Tuple = self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
"""simple docstring"""
lowerCamelCase : Optional[Any] ="bz2"
lowerCamelCase : int ="bz2"
lowerCamelCase : List[Any] =".bz2"
class SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
"""simple docstring"""
lowerCamelCase : List[Any] ="gzip"
lowerCamelCase : Any ="gzip"
lowerCamelCase : int =".gz"
class SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] ="lz4"
lowerCamelCase : List[str] ="lz4"
lowerCamelCase : int =".lz4"
class SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
"""simple docstring"""
lowerCamelCase : int ="xz"
lowerCamelCase : str ="xz"
lowerCamelCase : Union[str, Any] =".xz"
class SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
"""simple docstring"""
lowerCamelCase : List[str] ="zstd"
lowerCamelCase : Any ="zstd"
lowerCamelCase : Dict =".zst"
def __init__( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : str = "rb" , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[dict] = None , lowerCAmelCase : int = DEFAULT_BLOCK_SIZE , **lowerCAmelCase : List[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
__lowerCAmelCase : Dict = self.file.__enter__
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : int = file_
def __enter__( self : Dict ) -> List[str]:
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self : Dict , *lowerCAmelCase : str , **lowerCAmelCase : Any ) -> List[str]:
"""simple docstring"""
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return iter(self._file )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return next(self._file )
def __getattr__( self : Any , lowerCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
return getattr(self._file , __snake_case )
def fixed_enter(*lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : int ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
__lowerCAmelCase : List[str] = fixed_enter
| 651 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = 1
@register_to_config
def __init__( self :Any , __snake_case :Tuple=20_00 , __snake_case :Optional[Any]=0.1 , __snake_case :Any=20 , __snake_case :Optional[int]=1E-3 ):
'''simple docstring'''
__magic_name__ : Dict =None
__magic_name__ : List[str] =None
__magic_name__ : str =None
def A__ ( self :Dict , __snake_case :Optional[int] , __snake_case :Union[str, torch.device] = None ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =torch.linspace(1 , self.config.sampling_eps , __snake_case , device=__snake_case )
def A__ ( self :List[str] , __snake_case :List[str] , __snake_case :int , __snake_case :int , __snake_case :List[str]=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__magic_name__ : int =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__magic_name__ : Optional[int] =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__magic_name__ : str =std.flatten()
while len(std.shape ) < len(score.shape ):
__magic_name__ : List[str] =std.unsqueeze(-1 )
__magic_name__ : Union[str, Any] =-score / std
# compute
__magic_name__ : Tuple =-1.0 / len(self.timesteps )
__magic_name__ : int =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__magic_name__ : Dict =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__magic_name__ : Any =beta_t.unsqueeze(-1 )
__magic_name__ : Dict =-0.5 * beta_t * x
__magic_name__ : Optional[int] =torch.sqrt(__snake_case )
__magic_name__ : int =drift - diffusion**2 * score
__magic_name__ : List[str] =x + drift * dt
# add noise
__magic_name__ : Optional[int] =randn_tensor(x.shape , layout=x.layout , generator=__snake_case , device=x.device , dtype=x.dtype )
__magic_name__ : Optional[Any] =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self :List[Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 21 | 0 |
'''simple docstring'''
import os
from math import logaa
def snake_case ( UpperCAmelCase : str = "base_exp.txt" ):
A = 0
A = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(UpperCAmelCase ), UpperCAmelCase ) ) ):
A , A = list(map(UpperCAmelCase, line.split(',' ) ) )
if x * logaa(UpperCAmelCase ) > largest:
A = x * logaa(UpperCAmelCase )
A = i + 1
return result
if __name__ == "__main__":
print(solution())
| 717 |
def snake_case ( UpperCAmelCase : Optional[int], UpperCAmelCase : Union[str, Any] ):
A = ''
for i in table:
res += inp[i - 1]
return res
def snake_case ( UpperCAmelCase : Union[str, Any] ):
return data[1:] + data[0]
def snake_case ( UpperCAmelCase : Union[str, Any], UpperCAmelCase : Dict ):
A = ''
for i in range(len(UpperCAmelCase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def snake_case ( UpperCAmelCase : int, UpperCAmelCase : Optional[Any] ):
A = int('0b' + data[0] + data[-1], 2 )
A = int('0b' + data[1:3], 2 )
return bin(s[row][col] )[2:]
def snake_case ( UpperCAmelCase : Optional[Any], UpperCAmelCase : Any, UpperCAmelCase : Union[str, Any], UpperCAmelCase : Optional[Any], UpperCAmelCase : Optional[int] ):
A = message[:4]
A = message[4:]
A = apply_table(UpperCAmelCase, UpperCAmelCase )
A = xor(UpperCAmelCase, UpperCAmelCase )
A = apply_sbox(UpperCAmelCase, temp[:4] ) # noqa: E741
A = apply_sbox(UpperCAmelCase, temp[4:] )
A = '0' * (2 - len(UpperCAmelCase )) + l # noqa: E741
A = '0' * (2 - len(UpperCAmelCase )) + r
A = apply_table(l + r, UpperCAmelCase )
A = xor(UpperCAmelCase, UpperCAmelCase )
return temp + right
if __name__ == "__main__":
lowerCAmelCase_ = input('Enter 10 bit key: ')
lowerCAmelCase_ = input('Enter 8 bit message: ')
lowerCAmelCase_ = [6, 3, 7, 4, 8, 5, 10, 9]
lowerCAmelCase_ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowerCAmelCase_ = [2, 4, 3, 1]
lowerCAmelCase_ = [2, 6, 3, 1, 4, 8, 5, 7]
lowerCAmelCase_ = [4, 1, 3, 5, 7, 2, 8, 6]
lowerCAmelCase_ = [4, 1, 2, 3, 2, 3, 4, 1]
lowerCAmelCase_ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowerCAmelCase_ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowerCAmelCase_ = apply_table(key, paa_table)
lowerCAmelCase_ = temp[:5]
lowerCAmelCase_ = temp[5:]
lowerCAmelCase_ = left_shift(left)
lowerCAmelCase_ = left_shift(right)
lowerCAmelCase_ = apply_table(left + right, pa_table)
lowerCAmelCase_ = left_shift(left)
lowerCAmelCase_ = left_shift(right)
lowerCAmelCase_ = left_shift(left)
lowerCAmelCase_ = left_shift(right)
lowerCAmelCase_ = apply_table(left + right, pa_table)
# encryption
lowerCAmelCase_ = apply_table(message, IP)
lowerCAmelCase_ = function(expansion, sa, sa, keya, temp)
lowerCAmelCase_ = temp[4:] + temp[:4]
lowerCAmelCase_ = function(expansion, sa, sa, keya, temp)
lowerCAmelCase_ = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
lowerCAmelCase_ = apply_table(CT, IP)
lowerCAmelCase_ = function(expansion, sa, sa, keya, temp)
lowerCAmelCase_ = temp[4:] + temp[:4]
lowerCAmelCase_ = function(expansion, sa, sa, keya, temp)
lowerCAmelCase_ = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 110 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"""vocab_file""": """vocab.txt"""}
UpperCamelCase = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
UpperCamelCase = {
"""YituTech/conv-bert-base""": 512,
"""YituTech/conv-bert-medium-small""": 512,
"""YituTech/conv-bert-small""": 512,
}
UpperCamelCase = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : str = VOCAB_FILES_NAMES
A__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : List[str] = PRETRAINED_INIT_CONFIGURATION
A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Union[str, Any] = ConvBertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="[UNK]" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="[PAD]" , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__="[MASK]" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> List[Any]:
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
A__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get("strip_accents" , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
A__ = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop("type" ) )
A__ = do_lower_case
A__ = strip_accents
A__ = tokenize_chinese_chars
A__ = normalizer_class(**SCREAMING_SNAKE_CASE__ )
A__ = do_lower_case
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> Dict:
A__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
A__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 104 |
from collections.abc import Generator
from math import sin
def lowercase_ ( __snake_case : bytes ) -> bytes:
'''simple docstring'''
if len(__snake_case ) != 32:
raise ValueError("Input must be of length 32" )
snake_case__ :Any = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowercase_ ( __snake_case : int ) -> bytes:
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
snake_case__ :List[str] = format(__snake_case , "08x" )[-8:]
snake_case__ :List[str] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def lowercase_ ( __snake_case : bytes ) -> bytes:
'''simple docstring'''
snake_case__ :Dict = b""
for char in message:
bit_string += format(__snake_case , "08b" ).encode("utf-8" )
snake_case__ :str = format(len(__snake_case ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__snake_case ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowercase_ ( __snake_case : bytes ) -> Generator[list[int], None, None]:
'''simple docstring'''
if len(__snake_case ) % 5_12 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__snake_case ) , 5_12 ):
snake_case__ :Tuple = bit_string[pos : pos + 5_12]
snake_case__ :List[Any] = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def lowercase_ ( __snake_case : int ) -> int:
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
snake_case__ :Union[str, Any] = format(__snake_case , "032b" )
snake_case__ :Union[str, Any] = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__snake_case , 2 )
def lowercase_ ( __snake_case : int , __snake_case : int ) -> int:
'''simple docstring'''
return (a + b) % 2**32
def lowercase_ ( __snake_case : int , __snake_case : int ) -> int:
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowercase_ ( __snake_case : bytes ) -> bytes:
'''simple docstring'''
snake_case__ :Tuple = preprocess(__snake_case )
snake_case__ :int = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
snake_case__ :Tuple = 0X67_452_301
snake_case__ :str = 0Xef_cda_b89
snake_case__ :Dict = 0X98_bad_cfe
snake_case__ :List[str] = 0X10_325_476
snake_case__ :str = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__snake_case ):
snake_case__ :List[Any] = aa
snake_case__ :List[str] = ba
snake_case__ :List[Any] = ca
snake_case__ :Tuple = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
snake_case__ :Dict = d ^ (b & (c ^ d))
snake_case__ :List[str] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
snake_case__ :str = c ^ (d & (b ^ c))
snake_case__ :Optional[int] = (5 * i + 1) % 16
elif i <= 47:
snake_case__ :Union[str, Any] = b ^ c ^ d
snake_case__ :Union[str, Any] = (3 * i + 5) % 16
else:
snake_case__ :List[str] = c ^ (b | not_aa(__snake_case ))
snake_case__ :Dict = (7 * i) % 16
snake_case__ :Union[str, Any] = (f + a + added_consts[i] + block_words[g]) % 2**32
snake_case__ :Optional[Any] = d
snake_case__ :List[str] = c
snake_case__ :Union[str, Any] = b
snake_case__ :Dict = sum_aa(__snake_case , left_rotate_aa(__snake_case , shift_amounts[i] ) )
# Add hashed chunk to running total
snake_case__ :Any = sum_aa(__snake_case , __snake_case )
snake_case__ :List[Any] = sum_aa(__snake_case , __snake_case )
snake_case__ :Any = sum_aa(__snake_case , __snake_case )
snake_case__ :int = sum_aa(__snake_case , __snake_case )
snake_case__ :Optional[Any] = reformat_hex(__snake_case ) + reformat_hex(__snake_case ) + reformat_hex(__snake_case ) + reformat_hex(__snake_case )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod() | 241 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 600 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["image_processor", "feature_extractor"]
SCREAMING_SNAKE_CASE_ : Dict = "TvltImageProcessor"
SCREAMING_SNAKE_CASE_ : List[Any] = "TvltFeatureExtractor"
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
super().__init__(image_processor=UpperCAmelCase_ ,feature_extractor=UpperCAmelCase_ )
_lowercase : Any = image_processor
_lowercase : Any = feature_extractor
def __call__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=False ,UpperCAmelCase_=False ,*UpperCAmelCase_ ,**UpperCAmelCase_ ,):
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
_lowercase : int = None
if images is not None:
_lowercase : Optional[Any] = self.image_processor(UpperCAmelCase_ ,mask_pixel=UpperCAmelCase_ ,*UpperCAmelCase_ ,**UpperCAmelCase_ )
if images_mixed is not None:
_lowercase : Optional[int] = self.image_processor(UpperCAmelCase_ ,is_mixed=UpperCAmelCase_ ,*UpperCAmelCase_ ,**UpperCAmelCase_ )
if audio is not None:
_lowercase : Tuple = self.feature_extractor(
UpperCAmelCase_ ,*UpperCAmelCase_ ,sampling_rate=UpperCAmelCase_ ,mask_audio=UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Dict = {}
if audio is not None:
output_dict.update(UpperCAmelCase_ )
if images is not None:
output_dict.update(UpperCAmelCase_ )
if images_mixed_dict is not None:
output_dict.update(UpperCAmelCase_ )
return output_dict
@property
def lowerCamelCase__ ( self ):
_lowercase : List[str] = self.image_processor.model_input_names
_lowercase : int = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 600 | 1 |
import os
from pathlib import Path
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
from torch.utils.cpp_extension import load
__A : int = Path(a__ ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
__A : Any = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" ,"""ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" ,"""ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" ,a__ ,with_cuda=a__ ,extra_include_paths=[str(a__ )] ,extra_cflags=["""-DWITH_CUDA=1"""] ,extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] ,)
import MultiScaleDeformableAttention as MSDA
return MSDA
| 17 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : int ="Speech2TextFeatureExtractor"
a : int ="Speech2TextTokenizer"
def __init__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
lowerCAmelCase : Any = self.feature_extractor
lowerCAmelCase : str = False
def __call__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*snake_case__ , **snake_case__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowerCAmelCase : Any = kwargs.pop("raw_speech" )
else:
lowerCAmelCase : Optional[int] = kwargs.pop("audio" , snake_case__ )
lowerCAmelCase : Union[str, Any] = kwargs.pop("sampling_rate" , snake_case__ )
lowerCAmelCase : str = kwargs.pop("text" , snake_case__ )
if len(snake_case__ ) > 0:
lowerCAmelCase : int = args[0]
lowerCAmelCase : List[Any] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowerCAmelCase : Dict = self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
if text is not None:
lowerCAmelCase : int = self.tokenizer(snake_case__ , **snake_case__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase : Dict = encodings["input_ids"]
return inputs
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@contextmanager
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowerCAmelCase : List[str] = True
lowerCAmelCase : Any = self.tokenizer
yield
lowerCAmelCase : Optional[Any] = self.feature_extractor
lowerCAmelCase : Dict = False
| 645 | 0 |
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
snake_case : Union[str, Any] = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def snake_case__ ( ) -> List[str]:
"""simple docstring"""
A__ : int = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
A__ : Any = get_sagemaker_input()
else:
A__ : Optional[Any] = get_cluster_input()
return config
def snake_case__ ( __lowercase=None ) -> int:
"""simple docstring"""
if subparsers is not None:
A__ : Tuple = subparsers.add_parser("config" , description=lowerCamelCase_ )
else:
A__ : Tuple = argparse.ArgumentParser("Accelerate config command" , description=lowerCamelCase_ )
parser.add_argument(
"--config_file" , default=lowerCamelCase_ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have "
"such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed "
"with \'huggingface\'."
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase_ )
return parser
def snake_case__ ( __lowercase ) -> List[Any]:
"""simple docstring"""
A__ : Optional[Any] = get_user_input()
if args.config_file is not None:
A__ : int = args.config_file
else:
if not os.path.isdir(lowerCamelCase_ ):
os.makedirs(lowerCamelCase_ )
A__ : Tuple = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(lowerCamelCase_ )
else:
config.to_yaml_file(lowerCamelCase_ )
print(F'accelerate configuration saved at {config_file}' )
def snake_case__ ( ) -> Dict:
"""simple docstring"""
A__ : Dict = config_command_parser()
A__ : Optional[Any] = parser.parse_args()
config_command(lowerCamelCase_ )
if __name__ == "__main__":
main() | 706 |
from collections import Counter
from timeit import timeit
def snake_case__ ( __lowercase = "" , ) -> bool:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def snake_case__ ( __lowercase = "" ) -> bool:
"""simple docstring"""
if len(__lowercase ) == 0:
return True
A__ : Any = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
A__ : dict[str, int] = {}
for character in lower_case_input_str:
A__ : Optional[int] = character_freq_dict.get(__lowercase , 0 ) + 1
A__ : List[Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def snake_case__ ( __lowercase = "" ) -> None:
"""simple docstring"""
print("\nFor string = " , __lowercase , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(__lowercase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(__lowercase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
snake_case : Dict = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
snake_case : Dict = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""") | 182 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( snake_case ):
UpperCamelCase_ :Dict = ["""image_processor""", """tokenizer"""]
UpperCamelCase_ :str = """BridgeTowerImageProcessor"""
UpperCamelCase_ :Tuple = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , _lowercase , _lowercase )-> Any:
super().__init__(_lowercase , _lowercase )
def __call__( self , _lowercase , _lowercase = None , _lowercase = True , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = True , _lowercase = None , **_lowercase , )-> BatchEncoding:
UpperCamelCase_ = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_token_type_ids=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
# add pixel_values + pixel_mask
UpperCamelCase_ = self.image_processor(
_lowercase , return_tensors=_lowercase , do_normalize=_lowercase , do_center_crop=_lowercase , **_lowercase )
encoding.update(_lowercase )
return encoding
def UpperCAmelCase_ ( self , *_lowercase , **_lowercase )-> Union[str, Any]:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase_ ( self , *_lowercase , **_lowercase )-> List[Any]:
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = self.tokenizer.model_input_names
UpperCamelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 628 |
from __future__ import annotations
import math
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> int:
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , )
return min(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , )
def lowerCAmelCase( )-> None:
"""simple docstring"""
UpperCamelCase_ = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
UpperCamelCase_ = math.log(len(SCREAMING_SNAKE_CASE_ ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 628 | 1 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCAmelCase ):
A_ : Tuple = ['note_seq']
def __init__(self : List[Any] , *a__ : Tuple , **a__ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def a (cls : Optional[int] , *a__ : Optional[int] , **a__ : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def a (cls : Optional[int] , *a__ : Optional[Any] , **a__ : int ):
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 388 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 388 | 1 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
a_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCAmelCase, speech_processor=lowerCAmelCase, vae=lowerCAmelCase, text_encoder=lowerCAmelCase, tokenizer=lowerCAmelCase, unet=lowerCAmelCase, scheduler=lowerCAmelCase, feature_extractor=lowerCAmelCase, )
def lowercase__ ( self, lowerCAmelCase = "auto" ):
"""simple docstring"""
if slice_size == "auto":
lowerCamelCase_ =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
self.enable_attention_slicing(lowerCAmelCase )
@torch.no_grad()
def __call__( self, lowerCAmelCase, lowerCAmelCase=16_000, lowerCAmelCase = 512, lowerCAmelCase = 512, lowerCAmelCase = 50, lowerCAmelCase = 7.5, lowerCAmelCase = None, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = 1, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.speech_processor.feature_extractor(
lowerCAmelCase, return_tensors='''pt''', sampling_rate=lowerCAmelCase ).input_features.to(self.device )
lowerCamelCase_ =self.speech_model.generate(lowerCAmelCase, max_length=480_000 )
lowerCamelCase_ =self.speech_processor.tokenizer.batch_decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase, normalize=lowerCAmelCase )[
0
]
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =1
elif isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =len(lowerCAmelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCAmelCase, lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(lowerCAmelCase )}.''' )
# get prompt text embeddings
lowerCamelCase_ =self.tokenizer(
lowerCAmelCase, padding='''max_length''', max_length=self.tokenizer.model_max_length, return_tensors='''pt''', )
lowerCamelCase_ =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase_ =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowerCamelCase_ =text_input_ids[:, : self.tokenizer.model_max_length]
lowerCamelCase_ =self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =text_embeddings.shape
lowerCamelCase_ =text_embeddings.repeat(1, lowerCAmelCase, 1 )
lowerCamelCase_ =text_embeddings.view(bs_embed * num_images_per_prompt, lowerCAmelCase, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ =guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ =42
if negative_prompt is None:
lowerCamelCase_ =[''''''] * batch_size
elif type(lowerCAmelCase ) is not type(lowerCAmelCase ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(lowerCAmelCase )} !='''
f''' {type(lowerCAmelCase )}.''' )
elif isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =[negative_prompt]
elif batch_size != len(lowerCAmelCase ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(lowerCAmelCase )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
''' the batch size of `prompt`.''' )
else:
lowerCamelCase_ =negative_prompt
lowerCamelCase_ =text_input_ids.shape[-1]
lowerCamelCase_ =self.tokenizer(
lowerCAmelCase, padding='''max_length''', max_length=lowerCAmelCase, truncation=lowerCAmelCase, return_tensors='''pt''', )
lowerCamelCase_ =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ =uncond_embeddings.shape[1]
lowerCamelCase_ =uncond_embeddings.repeat(1, lowerCAmelCase, 1 )
lowerCamelCase_ =uncond_embeddings.view(batch_size * num_images_per_prompt, lowerCAmelCase, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ =torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ =(batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ =text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase_ =torch.randn(lowerCAmelCase, generator=lowerCAmelCase, device='''cpu''', dtype=lowerCAmelCase ).to(
self.device )
else:
lowerCamelCase_ =torch.randn(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=lowerCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowerCamelCase_ =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase_ =self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ ='''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ ={}
if accepts_eta:
lowerCamelCase_ =eta
for i, t in enumerate(self.progress_bar(lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ =self.scheduler.scale_model_input(lowerCAmelCase, lowerCAmelCase )
# predict the noise residual
lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase, encoder_hidden_states=lowerCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase_, lowerCamelCase_ =noise_pred.chunk(2 )
lowerCamelCase_ =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ =self.scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =1 / 0.1_8_2_1_5 * latents
lowerCamelCase_ =self.vae.decode(lowerCAmelCase ).sample
lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCAmelCase, nsfw_content_detected=lowerCAmelCase )
| 676 |
'''simple docstring'''
from __future__ import annotations
a_ : int = list[list[int]]
# assigning initial values to the grid
a_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a_ ( __snake_case : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__snake_case ):
lowerCamelCase_, lowerCamelCase_ =location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ):
lowerCamelCase_ =digit
if sudoku(__snake_case ) is not None:
return grid
lowerCamelCase_ =0
return None
def a_ ( __snake_case : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__snake_case , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
a_ : Union[str, Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 676 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE = 50 ):
lowerCAmelCase_ : str =[1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 703 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = '''owlvit_text_model'''
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=49408 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2048 , UpperCamelCase_ : Optional[int]=12 , UpperCamelCase_ : List[str]=8 , UpperCamelCase_ : List[str]=16 , UpperCamelCase_ : List[str]="quick_gelu" , UpperCamelCase_ : Any=1E-5 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : Optional[Any]=0.0_2 , UpperCamelCase_ : Tuple=1.0 , UpperCamelCase_ : int=0 , UpperCamelCase_ : Optional[int]=49406 , UpperCamelCase_ : str=49407 , **UpperCamelCase_ : Tuple , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase_ : Dict =vocab_size
lowerCAmelCase_ : Any =hidden_size
lowerCAmelCase_ : List[Any] =intermediate_size
lowerCAmelCase_ : Union[str, Any] =num_hidden_layers
lowerCAmelCase_ : List[str] =num_attention_heads
lowerCAmelCase_ : Optional[Any] =max_position_embeddings
lowerCAmelCase_ : str =hidden_act
lowerCAmelCase_ : Dict =layer_norm_eps
lowerCAmelCase_ : Dict =attention_dropout
lowerCAmelCase_ : Tuple =initializer_range
lowerCAmelCase_ : str =initializer_factor
@classmethod
def __A ( cls : str , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : Any ):
cls._set_token_in_kwargs(UpperCamelCase_ )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] =cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
lowerCAmelCase_ : Optional[Any] =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
_UpperCamelCase : Optional[int] = '''owlvit_vision_model'''
def __init__( self : int , UpperCamelCase_ : Tuple=768 , UpperCamelCase_ : Union[str, Any]=3072 , UpperCamelCase_ : Any=12 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Any=3 , UpperCamelCase_ : str=768 , UpperCamelCase_ : Dict=32 , UpperCamelCase_ : str="quick_gelu" , UpperCamelCase_ : int=1E-5 , UpperCamelCase_ : str=0.0 , UpperCamelCase_ : str=0.0_2 , UpperCamelCase_ : Optional[Any]=1.0 , **UpperCamelCase_ : Dict , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase_ : Dict =hidden_size
lowerCAmelCase_ : List[str] =intermediate_size
lowerCAmelCase_ : Union[str, Any] =num_hidden_layers
lowerCAmelCase_ : str =num_attention_heads
lowerCAmelCase_ : Any =num_channels
lowerCAmelCase_ : Optional[Any] =image_size
lowerCAmelCase_ : Union[str, Any] =patch_size
lowerCAmelCase_ : int =hidden_act
lowerCAmelCase_ : Optional[int] =layer_norm_eps
lowerCAmelCase_ : Dict =attention_dropout
lowerCAmelCase_ : Tuple =initializer_range
lowerCAmelCase_ : Tuple =initializer_factor
@classmethod
def __A ( cls : Any , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[Any] ):
cls._set_token_in_kwargs(UpperCamelCase_ )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] =cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
lowerCAmelCase_ : Tuple =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
_UpperCamelCase : Dict = '''owlvit'''
_UpperCamelCase : int = True
def __init__( self : List[Any] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : List[str]=512 , UpperCamelCase_ : Union[str, Any]=2.6_5_9_2 , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : int , ):
super().__init__(**UpperCamelCase_ )
if text_config is None:
lowerCAmelCase_ : Any ={}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
lowerCAmelCase_ : int ={}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
lowerCAmelCase_ : List[str] =OwlViTTextConfig(**UpperCamelCase_ )
lowerCAmelCase_ : Optional[int] =OwlViTVisionConfig(**UpperCamelCase_ )
lowerCAmelCase_ : List[str] =projection_dim
lowerCAmelCase_ : Optional[Any] =logit_scale_init_value
lowerCAmelCase_ : str =return_dict
lowerCAmelCase_ : Union[str, Any] =1.0
@classmethod
def __A ( cls : str , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : Optional[Any] ):
cls._set_token_in_kwargs(UpperCamelCase_ )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] =cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def __A ( cls : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , **UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase_ : List[str] ={}
lowerCAmelCase_ : Optional[int] =text_config
lowerCAmelCase_ : Optional[int] =vision_config
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
def __A ( self : Optional[int] ):
lowerCAmelCase_ : List[Any] =copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : str =self.text_config.to_dict()
lowerCAmelCase_ : Any =self.vision_config.to_dict()
lowerCAmelCase_ : str =self.__class__.model_type
return output
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
@property
def __A ( self : int ):
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def __A ( self : int ):
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def __A ( self : Any ):
return 1E-4
def __A ( self : Tuple , UpperCamelCase_ : "ProcessorMixin" , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : Optional["TensorType"] = None , ):
lowerCAmelCase_ : Optional[int] =super().generate_dummy_inputs(
processor.tokenizer , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , framework=UpperCamelCase_ )
lowerCAmelCase_ : Union[str, Any] =super().generate_dummy_inputs(
processor.image_processor , batch_size=UpperCamelCase_ , framework=UpperCamelCase_ )
return {**text_input_dict, **image_input_dict}
@property
def __A ( self : List[Any] ):
return 14
| 305 | 0 |
'''simple docstring'''
from __future__ import annotations
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : Dict = position
_UpperCamelCase : Any = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_UpperCamelCase : Optional[Any] = []
for position in positions:
_UpperCamelCase , _UpperCamelCase : Any = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(UpperCAmelCase_ )
return permissible_positions
def A__ ( UpperCAmelCase_ ):
return not any(elem == 0 for row in board for elem in row )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if is_complete(UpperCAmelCase_ ):
return True
for position in get_valid_pos(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
_UpperCamelCase , _UpperCamelCase : Any = position
if board[y][x] == 0:
_UpperCamelCase : int = curr + 1
if open_knight_tour_helper(UpperCAmelCase_ , UpperCAmelCase_ , curr + 1 ):
return True
_UpperCamelCase : Union[str, Any] = 0
return False
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Dict = [[0 for i in range(UpperCAmelCase_ )] for j in range(UpperCAmelCase_ )]
for i in range(UpperCAmelCase_ ):
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : Tuple = 1
if open_knight_tour_helper(UpperCAmelCase_ , (i, j) , 1 ):
return board
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : int = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 195 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
return base * power(UpperCAmelCase_ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
snake_case_ : int = int(input('Enter the base: ').strip())
snake_case_ : Optional[int] = int(input('Enter the exponent: ').strip())
snake_case_ : Optional[int] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
snake_case_ : List[Any] = 1 / result
print(F"""{base} to the power of {exponent} is {result}""")
| 195 | 1 |
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
A_ : int = logging.get_logger(__name__)
class lowerCamelCase (A__ ):
lowerCamelCase__ : Any = 'vision-encoder-decoder'
lowerCamelCase__ : Any = True
def __init__( self : int , **__UpperCAmelCase : Tuple ) -> Optional[int]:
super().__init__(**__UpperCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""encoder""" )
SCREAMING_SNAKE_CASE__ = encoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""decoder""" )
SCREAMING_SNAKE_CASE__ = decoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = True
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , __UpperCAmelCase : PretrainedConfig , __UpperCAmelCase : PretrainedConfig , **__UpperCAmelCase : Optional[int] ) -> PretrainedConfig:
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.encoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.decoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output
class lowerCamelCase (A__ ):
lowerCamelCase__ : List[str] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 1e-4
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class lowerCamelCase (A__ ):
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ = OrderedDict()
SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : "PreTrainedTokenizerBase" , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
import torch
SCREAMING_SNAKE_CASE__ = OrderedDict()
SCREAMING_SNAKE_CASE__ = super().generate_dummy_inputs(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = dummy_input["""input_ids"""].shape
SCREAMING_SNAKE_CASE__ = (batch, encoder_sequence, self._config.encoder_hidden_size)
SCREAMING_SNAKE_CASE__ = dummy_input.pop("""input_ids""" )
SCREAMING_SNAKE_CASE__ = dummy_input.pop("""attention_mask""" )
SCREAMING_SNAKE_CASE__ = torch.zeros(__UpperCAmelCase )
return common_inputs
class lowerCamelCase (A__ ):
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> None:
pass
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : PretrainedConfig ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : PretrainedConfig , __UpperCAmelCase : PretrainedConfig , __UpperCAmelCase : str = "default" ) -> OnnxConfig:
SCREAMING_SNAKE_CASE__ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__UpperCAmelCase , __UpperCAmelCase )
| 616 |
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
A_ : Optional[Any] = pytest.mark.integration
A_ : Union[str, Any] = {"comet"}
A_ : str = importlib.util.find_spec("fairseq") is not None
A_ : Any = {"code_eval"}
A_ : Tuple = os.name == "nt"
A_ : List[Any] = {"bertscore", "frugalscore", "perplexity"}
A_ : Any = importlib.util.find_spec("transformers") is not None
def A ( snake_case__ ):
'''simple docstring'''
@wraps(snake_case__ )
def wrapper(self , snake_case__ ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , snake_case__ )
return wrapper
def A ( snake_case__ ):
'''simple docstring'''
@wraps(snake_case__ )
def wrapper(self , snake_case__ ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , snake_case__ )
return wrapper
def A ( snake_case__ ):
'''simple docstring'''
@wraps(snake_case__ )
def wrapper(self , snake_case__ ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , snake_case__ )
return wrapper
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
A__ ,A__ ,A__ )
@local
class lowerCamelCase (parameterized.TestCase ):
lowerCamelCase__ : int = {}
lowerCamelCase__ : Dict = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" )
def SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = """[...]"""
SCREAMING_SNAKE_CASE__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , __UpperCAmelCase ) ).module_path )
SCREAMING_SNAKE_CASE__ = datasets.load.import_main_class(metric_module.__name__ , dataset=__UpperCAmelCase )
# check parameters
SCREAMING_SNAKE_CASE__ = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__UpperCAmelCase , metric_module.__name__ ):
with self.use_local_metrics():
try:
SCREAMING_SNAKE_CASE__ = doctest.testmod(__UpperCAmelCase , verbose=__UpperCAmelCase , raise_on_error=__UpperCAmelCase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ = """[...]"""
SCREAMING_SNAKE_CASE__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , __UpperCAmelCase ) ).module_path )
# run doctest
with self.use_local_metrics():
SCREAMING_SNAKE_CASE__ = doctest.testmod(__UpperCAmelCase , verbose=__UpperCAmelCase , raise_on_error=__UpperCAmelCase )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] ) -> str:
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__UpperCAmelCase ):
yield
else:
yield
@contextmanager
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
def load_local_metric(__UpperCAmelCase : int , *__UpperCAmelCase : int , **__UpperCAmelCase : Optional[int] ):
return load_metric(os.path.join("""metrics""" , __UpperCAmelCase ) , *__UpperCAmelCase , **__UpperCAmelCase )
with patch("""datasets.load_metric""" ) as mock_load_metric:
SCREAMING_SNAKE_CASE__ = load_local_metric
yield
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict , __UpperCAmelCase : int ) -> Optional[Any]:
def wrapper(__UpperCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE__ = contextmanager(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def A ( snake_case__ ):
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class lowerCamelCase (A__ ):
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Union[str, Any] ) -> List[str]:
assert len(input_dict["""input_ids"""] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
SCREAMING_SNAKE_CASE__ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def A ( snake_case__ ):
'''simple docstring'''
import torch
def bert_cos_score_idf(snake_case__ , snake_case__ , *snake_case__ , **snake_case__ ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(snake_case__ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
SCREAMING_SNAKE_CASE__ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def A ( snake_case__ ):
'''simple docstring'''
def load_from_checkpoint(snake_case__ ):
class lowerCamelCase :
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : Any , *__UpperCAmelCase : str , **__UpperCAmelCase : List[Any] ) -> Optional[int]:
assert len(__UpperCAmelCase ) == 2
SCREAMING_SNAKE_CASE__ = [0.19, 0.92]
return scores, sum(__UpperCAmelCase ) / len(__UpperCAmelCase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
SCREAMING_SNAKE_CASE__ = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
SCREAMING_SNAKE_CASE__ = load_from_checkpoint
yield
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = load_metric(os.path.join("""metrics""" , """seqeval""" ) )
SCREAMING_SNAKE_CASE__ = """ERROR"""
SCREAMING_SNAKE_CASE__ = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(snake_case__ , match=re.escape(snake_case__ ) ):
metric.compute(predictions=[] , references=[] , scheme=snake_case__ )
| 616 | 1 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __magic_name__ (snake_case_ ,snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = StableUnCLIPPipeline
__lowercase : Any = TEXT_TO_IMAGE_PARAMS
__lowercase : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
__lowercase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowercase : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__lowercase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = 32
snake_case__ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
snake_case__ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_a , projection_dim=_a , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
snake_case__ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_a , num_layers=1 , )
torch.manual_seed(0 )
snake_case__ = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_a , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
snake_case__ = StableUnCLIPImageNormalizer(embedding_dim=_a )
snake_case__ = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
snake_case__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_a , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
snake_case__ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_a , layers_per_block=1 , upcast_attention=_a , use_linear_projection=_a , )
torch.manual_seed(0 )
snake_case__ = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_a , steps_offset=1 , )
torch.manual_seed(0 )
snake_case__ = AutoencoderKL()
snake_case__ = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:Tuple , _a:Any=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_a )
@slow
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
snake_case__ = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case__ = pipe('''anime turle''' , generator=_a , output_type='''np''' )
snake_case__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
snake_case__ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case__ = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
snake_case__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 33 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = (KDPMaDiscreteScheduler,)
__UpperCamelCase = 10
def __lowerCAmelCase ( self : Optional[Any] , **A__ : Optional[int] ) -> int:
'''simple docstring'''
a__ : Optional[int] = {
'''num_train_timesteps''': 1_1_0_0,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**A__ )
return config
def __lowerCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__ )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=A__ , beta_end=A__ )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A__ )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__ )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
'''simple docstring'''
a__ : Any = self.scheduler_classes[0]
a__ : str = self.get_scheduler_config(prediction_type='''v_prediction''' )
a__ : Dict = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps )
a__ : Tuple = self.dummy_model()
a__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : Dict = sample.to(A__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : Optional[Any] = scheduler.scale_model_input(A__ , A__ )
a__ : Union[str, Any] = model(A__ , A__ )
a__ : List[str] = scheduler.step(A__ , A__ , A__ )
a__ : Optional[Any] = output.prev_sample
a__ : Tuple = torch.sum(torch.abs(A__ ) )
a__ : Optional[int] = torch.mean(torch.abs(A__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
if torch_device == "mps":
return
a__ : List[Any] = self.scheduler_classes[0]
a__ : Tuple = self.get_scheduler_config()
a__ : Tuple = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps )
a__ : List[Any] = self.dummy_model()
a__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : Any = sample.to(A__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : str = scheduler.scale_model_input(A__ , A__ )
a__ : List[str] = model(A__ , A__ )
a__ : str = scheduler.step(A__ , A__ , A__ )
a__ : List[Any] = output.prev_sample
a__ : Dict = torch.sum(torch.abs(A__ ) )
a__ : Optional[Any] = torch.mean(torch.abs(A__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def __lowerCAmelCase ( self : str ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
a__ : Optional[int] = self.scheduler_classes[0]
a__ : Tuple = self.get_scheduler_config()
a__ : List[Any] = scheduler_class(**A__ )
scheduler.set_timesteps(self.num_inference_steps , device=A__ )
a__ : Union[str, Any] = self.dummy_model()
a__ : List[Any] = self.dummy_sample_deter.to(A__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
a__ : Optional[int] = scheduler.scale_model_input(A__ , A__ )
a__ : List[Any] = model(A__ , A__ )
a__ : Any = scheduler.step(A__ , A__ , A__ )
a__ : List[str] = output.prev_sample
a__ : Any = torch.sum(torch.abs(A__ ) )
a__ : Union[str, Any] = torch.mean(torch.abs(A__ ) )
if str(A__ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 688 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : int = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class lowerCamelCase ( __snake_case ):
__lowerCamelCase = 'luke'
def __init__( self , __lowerCamelCase=5_02_67 , __lowerCamelCase=50_00_00 , __lowerCamelCase=7_68 , __lowerCamelCase=2_56 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=30_72 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_12 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=1e-12 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=1 , __lowerCamelCase=0 , __lowerCamelCase=2 , **__lowerCamelCase , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
snake_case: List[Any] = vocab_size
snake_case: Optional[Any] = entity_vocab_size
snake_case: Optional[int] = hidden_size
snake_case: int = entity_emb_size
snake_case: Tuple = num_hidden_layers
snake_case: List[Any] = num_attention_heads
snake_case: int = hidden_act
snake_case: Optional[Any] = intermediate_size
snake_case: List[str] = hidden_dropout_prob
snake_case: Tuple = attention_probs_dropout_prob
snake_case: Any = max_position_embeddings
snake_case: List[Any] = type_vocab_size
snake_case: Union[str, Any] = initializer_range
snake_case: Optional[Any] = layer_norm_eps
snake_case: Union[str, Any] = use_entity_aware_attention
snake_case: Any = classifier_dropout
| 714 | def a_ (_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int )-> int:
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case: Dict = _modexpt(_lowerCAmelCase , exponent // 2 , _lowerCAmelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_lowerCAmelCase , exponent - 1 , _lowerCAmelCase )) % modulo_value
def a_ (_lowerCAmelCase : int = 1777 , _lowerCAmelCase : int = 1855 , _lowerCAmelCase : int = 8 )-> int:
snake_case: Dict = base
for _ in range(1 , _lowerCAmelCase ):
snake_case: int = _modexpt(_lowerCAmelCase , _lowerCAmelCase , 10**digits )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 164 | 0 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def __magic_name__ ( __a : Any ):
'''simple docstring'''
UpperCamelCase__ = VideoMAEConfig()
set_architecture_configs(__a , __a )
if "finetuned" not in model_name:
UpperCamelCase__ = False
if "finetuned" in model_name:
UpperCamelCase__ = """huggingface/label-files"""
if "kinetics" in model_name:
UpperCamelCase__ = 400
UpperCamelCase__ = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
UpperCamelCase__ = 174
UpperCamelCase__ = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
UpperCamelCase__ = json.load(open(hf_hub_download(__a , __a , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase__ = {int(__a ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
return config
def __magic_name__ ( __a : Optional[Any] , __a : Any ):
'''simple docstring'''
if "small" in model_name:
UpperCamelCase__ = 384
UpperCamelCase__ = 1_536
UpperCamelCase__ = 12
UpperCamelCase__ = 16
UpperCamelCase__ = 12
UpperCamelCase__ = 3
UpperCamelCase__ = 192
UpperCamelCase__ = 768
elif "large" in model_name:
UpperCamelCase__ = 1_024
UpperCamelCase__ = 4_096
UpperCamelCase__ = 24
UpperCamelCase__ = 16
UpperCamelCase__ = 12
UpperCamelCase__ = 8
UpperCamelCase__ = 512
UpperCamelCase__ = 2_048
elif "huge" in model_name:
UpperCamelCase__ = 1_280
UpperCamelCase__ = 5_120
UpperCamelCase__ = 32
UpperCamelCase__ = 16
UpperCamelCase__ = 12
UpperCamelCase__ = 8
UpperCamelCase__ = 640
UpperCamelCase__ = 2_560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def __magic_name__ ( __a : Tuple ):
'''simple docstring'''
if "encoder." in name:
UpperCamelCase__ = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
UpperCamelCase__ = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
UpperCamelCase__ = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase__ = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
UpperCamelCase__ = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
UpperCamelCase__ = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
UpperCamelCase__ = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
UpperCamelCase__ = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
UpperCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
UpperCamelCase__ = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
UpperCamelCase__ = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
UpperCamelCase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
UpperCamelCase__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
UpperCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
UpperCamelCase__ = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
UpperCamelCase__ = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
UpperCamelCase__ = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
UpperCamelCase__ = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
UpperCamelCase__ = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
UpperCamelCase__ = name.replace("""head""" , """classifier""" )
return name
def __magic_name__ ( __a : List[Any] , __a : List[Any] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCamelCase__ = orig_state_dict.pop(__a )
if key.startswith("""encoder.""" ):
UpperCamelCase__ = key.replace("""encoder.""" , """""" )
if "qkv" in key:
UpperCamelCase__ = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
UpperCamelCase__ = config.decoder_hidden_size
UpperCamelCase__ = int(key_split[2] )
UpperCamelCase__ = """decoder.decoder_layers."""
if "weight" in key:
UpperCamelCase__ = val[:dim, :]
UpperCamelCase__ = val[dim : dim * 2, :]
UpperCamelCase__ = val[-dim:, :]
else:
UpperCamelCase__ = config.hidden_size
UpperCamelCase__ = int(key_split[1] )
UpperCamelCase__ = """videomae.encoder.layer."""
if "weight" in key:
UpperCamelCase__ = val[:dim, :]
UpperCamelCase__ = val[dim : dim * 2, :]
UpperCamelCase__ = val[-dim:, :]
else:
UpperCamelCase__ = val
return orig_state_dict
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
UpperCamelCase__ = np.load(__a )
return list(__a )
def __magic_name__ ( __a : Optional[int] , __a : Tuple , __a : Tuple , __a : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = get_videomae_config(__a )
if "finetuned" in model_name:
UpperCamelCase__ = VideoMAEForVideoClassification(__a )
else:
UpperCamelCase__ = VideoMAEForPreTraining(__a )
# download original checkpoint, hosted on Google Drive
UpperCamelCase__ = """pytorch_model.bin"""
gdown.cached_download(__a , __a , quiet=__a )
UpperCamelCase__ = torch.load(__a , map_location="""cpu""" )
if "model" in files:
UpperCamelCase__ = files["""model"""]
else:
UpperCamelCase__ = files["""module"""]
UpperCamelCase__ = convert_state_dict(__a , __a )
model.load_state_dict(__a )
model.eval()
# verify model on basic input
UpperCamelCase__ = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
UpperCamelCase__ = prepare_video()
UpperCamelCase__ = image_processor(__a , return_tensors="""pt""" )
if "finetuned" not in model_name:
UpperCamelCase__ = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
UpperCamelCase__ = torch.load(__a )
UpperCamelCase__ = model(**__a )
UpperCamelCase__ = outputs.logits
UpperCamelCase__ = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
UpperCamelCase__ = torch.Size([1, 400] )
UpperCamelCase__ = torch.tensor([-0.9_291, -0.4_061, -0.9_307] )
elif model_name == "videomae-small-finetuned-ssv2":
UpperCamelCase__ = torch.Size([1, 174] )
UpperCamelCase__ = torch.tensor([0.2_671, -0.4_689, -0.8_235] )
elif model_name == "videomae-base":
UpperCamelCase__ = torch.Size([1, 1_408, 1_536] )
UpperCamelCase__ = torch.tensor([[0.7_739, 0.7_968, 0.7_089], [0.6_701, 0.7_487, 0.6_209], [0.4_287, 0.5_158, 0.4_773]] )
elif model_name == "videomae-base-short":
UpperCamelCase__ = torch.Size([1, 1_408, 1_536] )
UpperCamelCase__ = torch.tensor([[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] )
# we verified the loss both for normalized and unnormalized targets for this one
UpperCamelCase__ = torch.tensor([0.5_142] ) if config.norm_pix_loss else torch.tensor([0.6_469] )
elif model_name == "videomae-large":
UpperCamelCase__ = torch.Size([1, 1_408, 1_536] )
UpperCamelCase__ = torch.tensor([[0.7_149, 0.7_997, 0.6_966], [0.6_768, 0.7_869, 0.6_948], [0.5_139, 0.6_221, 0.5_605]] )
elif model_name == "videomae-large-finetuned-kinetics":
UpperCamelCase__ = torch.Size([1, 400] )
UpperCamelCase__ = torch.tensor([0.0_771, 0.0_011, -0.3_625] )
elif model_name == "videomae-huge-finetuned-kinetics":
UpperCamelCase__ = torch.Size([1, 400] )
UpperCamelCase__ = torch.tensor([0.2_433, 0.1_632, -0.4_894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
UpperCamelCase__ = torch.Size([1, 400] )
UpperCamelCase__ = torch.tensor([0.6_588, 0.0_990, -0.2_493] )
elif model_name == "videomae-base-finetuned-kinetics":
UpperCamelCase__ = torch.Size([1, 400] )
UpperCamelCase__ = torch.tensor([0.3_669, -0.0_688, -0.2_421] )
elif model_name == "videomae-base-short-ssv2":
UpperCamelCase__ = torch.Size([1, 1_408, 1_536] )
UpperCamelCase__ = torch.tensor([[0.4_712, 0.5_296, 0.5_786], [0.2_278, 0.2_729, 0.4_026], [0.0_352, 0.0_730, 0.2_506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
UpperCamelCase__ = torch.Size([1, 174] )
UpperCamelCase__ = torch.tensor([-0.0_537, -0.1_539, -0.3_266] )
elif model_name == "videomae-base-ssv2":
UpperCamelCase__ = torch.Size([1, 1_408, 1_536] )
UpperCamelCase__ = torch.tensor([[0.8_131, 0.8_727, 0.8_546], [0.7_366, 0.9_377, 0.8_870], [0.5_935, 0.8_874, 0.8_564]] )
elif model_name == "videomae-base-finetuned-ssv2":
UpperCamelCase__ = torch.Size([1, 174] )
UpperCamelCase__ = torch.tensor([0.1_961, -0.8_337, -0.6_389] )
else:
raise ValueError(f"Model name not supported. Should be one of {model_names}" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __a , atol=1E-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __a , atol=1E-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
UpperCamelCase__ = outputs.loss
assert torch.allclose(__a , __a , atol=1E-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f"Saving model and image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__a )
model.save_pretrained(__a )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(__a , organization="""nielsr""" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4''',
type=str,
help=(
'''URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'''
''' download link.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/Users/nielsrogge/Documents/VideoMAE/Test''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--model_name''', default='''videomae-base''', type=str, help='''Name of the model.''')
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase_ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 513 |
from cva import destroyAllWindows, imread, imshow, waitKey
def __magic_name__ ( __a : List[Any] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__a ):
for j in range(__a ):
UpperCamelCase__ = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowerCamelCase_ = imread('''image_data/lena.jpg''', 1)
# convert to its negative
lowerCamelCase_ = convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 513 | 1 |
from torch import nn
def A ( snake_case__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"Unsupported activation function: {act_fn}" )
| 721 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def A ( snake_case__ : List[Any] ) -> str:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__snake_case = k.replace(snake_case__ , snake_case__ )
return k
def A ( snake_case__ : dict , snake_case__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
__snake_case = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
__snake_case = PegasusConfig(**snake_case__ )
__snake_case = PegasusForConditionalGeneration(snake_case__ )
__snake_case = torch_model.model.state_dict()
__snake_case = {}
for k, v in tf_weights.items():
__snake_case = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
__snake_case = v.T
__snake_case = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
__snake_case = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__snake_case = mapping['shared.weight']
__snake_case = mapping['shared.weight']
__snake_case = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
__snake_case , __snake_case = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
__snake_case = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def A ( snake_case__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
__snake_case = tf.train.list_variables(snake_case__ )
__snake_case = {}
__snake_case = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
__snake_case = any(pat in name for pat in ignore_name )
if skip_key:
continue
__snake_case = tf.train.load_variable(snake_case__ , snake_case__ )
__snake_case = array
return tf_weights
def A ( snake_case__ : str , snake_case__ : str ) -> Tuple:
'''simple docstring'''
# save tokenizer first
__snake_case = Path(snake_case__ ).parent.name
__snake_case = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
__snake_case = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
__snake_case = get_tf_weights_as_numpy(snake_case__ )
__snake_case = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
__snake_case = task_specific_params
__snake_case = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
__snake_case = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ : int = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase__ : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase__ : str = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 676 | 0 |
from __future__ import annotations
from collections import Counter
from random import random
class _snake_case :
def __init__( self):
'''simple docstring'''
lowercase__ : List[Any] = {}
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[int] = {}
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE_)
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = probability
def lowercase__ ( self):
'''simple docstring'''
return list(self.connections)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = 0
lowercase__ : List[Any] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> dict[str, int]:
'''simple docstring'''
lowercase__ : List[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = Counter(graph.get_nodes() )
lowercase__ : Tuple = start
for _ in range(lowercase_ ):
lowercase__ : Optional[Any] = graph.transition(lowercase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int:
snake_case__ = set()
snake_case__ = 0
snake_case__ = n + 1 # maximum limit
for a in range(2 , __lowerCAmelCase ):
for b in range(2 , __lowerCAmelCase ):
snake_case__ = a**b # calculates the current power
collect_powers.add(__lowerCAmelCase ) # adds the result to the set
return len(__lowerCAmelCase )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 33 | 0 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_UpperCAmelCase : Union[str, Any] =sys.version_info >= (3, 10)
def lowerCAmelCase ( lowerCAmelCase_=None , lowerCAmelCase_=None )-> Union[str, Any]:
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class snake_case__:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = 42
SCREAMING_SNAKE_CASE__ : Dict = 42
SCREAMING_SNAKE_CASE__ : str = 42
SCREAMING_SNAKE_CASE__ : int = 42
@dataclass
class snake_case__:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 42
SCREAMING_SNAKE_CASE__ : Optional[int] = field(default="""toto""", metadata={"""help""": """help message"""} )
@dataclass
class snake_case__:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : int = None
class snake_case__( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = """titi"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """toto"""
class snake_case__( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = """titi"""
SCREAMING_SNAKE_CASE__ : Dict = """toto"""
SCREAMING_SNAKE_CASE__ : List[Any] = 42
@dataclass
class snake_case__:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = """toto"""
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Optional[Any] = BasicEnum(self.foo )
@dataclass
class snake_case__:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = """toto"""
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : List[str] = MixedTypeEnum(self.foo )
@dataclass
class snake_case__:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : int = field(default=_UpperCamelCase, metadata={"""help""": """help message"""} )
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Dict = list_field(default=[] )
SCREAMING_SNAKE_CASE__ : List[Any] = list_field(default=[] )
@dataclass
class snake_case__:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = list_field(default=[] )
SCREAMING_SNAKE_CASE__ : List[str] = list_field(default=[1, 2, 3] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class snake_case__:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = field()
SCREAMING_SNAKE_CASE__ : List[Any] = field()
SCREAMING_SNAKE_CASE__ : Any = field()
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : List[Any] = BasicEnum(self.required_enum )
@dataclass
class snake_case__:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = 42
SCREAMING_SNAKE_CASE__ : Union[str, Any] = field()
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = field(default="""toto""", metadata={"""help""": """help message"""} )
SCREAMING_SNAKE_CASE__ : Optional[Any] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class snake_case__:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : Any = None
@dataclass
class snake_case__:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : List[str] = field(default=_UpperCamelCase, metadata={"""help""": """help message"""} )
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Any = list_field(default=[] )
SCREAMING_SNAKE_CASE__ : int = list_field(default=[] )
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self , __lowercase , __lowercase ) -> List[Any]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowerCAmelCase_ : Tuple = {k: v for k, v in vars(__lowercase ).items() if k != "container"}
lowerCAmelCase_ : Optional[Any] = {k: v for k, v in vars(__lowercase ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , __lowercase ) and yy.get('''choices''' , __lowercase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](__lowercase ) , yy['''type'''](__lowercase ) )
del xx["type"], yy["type"]
self.assertEqual(__lowercase , __lowercase )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Any = HfArgumentParser(__lowercase )
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__lowercase , required=__lowercase )
expected.add_argument('''--bar''' , type=__lowercase , required=__lowercase )
expected.add_argument('''--baz''' , type=__lowercase , required=__lowercase )
expected.add_argument('''--flag''' , type=__lowercase , default=__lowercase , const=__lowercase , nargs='''?''' )
self.argparsersEqual(__lowercase , __lowercase )
lowerCAmelCase_ : Tuple = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
(lowerCAmelCase_ ) : Optional[Any] = parser.parse_args_into_dataclasses(__lowercase , look_for_args_file=__lowercase )
self.assertFalse(example.flag )
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : List[Any] = HfArgumentParser(__lowercase )
lowerCAmelCase_ : Tuple = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=4_2 , type=__lowercase )
expected.add_argument('''--baz''' , default='''toto''' , type=__lowercase , help='''help message''' )
self.argparsersEqual(__lowercase , __lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__lowercase , default=__lowercase , const=__lowercase , nargs='''?''' )
expected.add_argument('''--baz''' , type=__lowercase , default=__lowercase , const=__lowercase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=__lowercase , dest='''baz''' )
expected.add_argument('''--opt''' , type=__lowercase , default=__lowercase )
lowerCAmelCase_ : Optional[int] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__lowercase )
for dataclass_type in dataclass_types:
lowerCAmelCase_ : int = HfArgumentParser(__lowercase )
self.argparsersEqual(__lowercase , __lowercase )
lowerCAmelCase_ : str = parser.parse_args([] )
self.assertEqual(__lowercase , Namespace(foo=__lowercase , baz=__lowercase , opt=__lowercase ) )
lowerCAmelCase_ : Dict = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(__lowercase , Namespace(foo=__lowercase , baz=__lowercase , opt=__lowercase ) )
lowerCAmelCase_ : Any = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(__lowercase , Namespace(foo=__lowercase , baz=__lowercase , opt=__lowercase ) )
lowerCAmelCase_ : int = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(__lowercase , Namespace(foo=__lowercase , baz=__lowercase , opt=__lowercase ) )
lowerCAmelCase_ : Optional[int] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(__lowercase , Namespace(foo=__lowercase , baz=__lowercase , opt=__lowercase ) )
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : Any = HfArgumentParser(__lowercase )
lowerCAmelCase_ : List[Any] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(__lowercase , __lowercase )
lowerCAmelCase_ : List[Any] = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
lowerCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowerCAmelCase_ : List[Any] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
lowerCAmelCase_ : Optional[int] = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowerCAmelCase_ : Optional[Any] = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
lowerCAmelCase_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowercase_ ( self ) -> int:
@dataclass
class snake_case__:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = """toto"""
lowerCAmelCase_ : Dict = HfArgumentParser(__lowercase )
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(__lowercase , __lowercase )
lowerCAmelCase_ : int = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
lowerCAmelCase_ : Optional[int] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
lowerCAmelCase_ : Union[str, Any] = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Union[str, Any] = HfArgumentParser(__lowercase )
lowerCAmelCase_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__lowercase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__lowercase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__lowercase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__lowercase )
self.argparsersEqual(__lowercase , __lowercase )
lowerCAmelCase_ : Any = parser.parse_args([] )
self.assertEqual(
__lowercase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
lowerCAmelCase_ : Any = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(__lowercase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=__lowercase , type=__lowercase )
expected.add_argument('''--bar''' , default=__lowercase , type=__lowercase , help='''help message''' )
expected.add_argument('''--baz''' , default=__lowercase , type=__lowercase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__lowercase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__lowercase )
lowerCAmelCase_ : str = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__lowercase )
for dataclass_type in dataclass_types:
lowerCAmelCase_ : Dict = HfArgumentParser(__lowercase )
self.argparsersEqual(__lowercase , __lowercase )
lowerCAmelCase_ : Any = parser.parse_args([] )
self.assertEqual(__lowercase , Namespace(foo=__lowercase , bar=__lowercase , baz=__lowercase , ces=[] , des=[] ) )
lowerCAmelCase_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(__lowercase , Namespace(foo=1_2 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : str = HfArgumentParser(__lowercase )
lowerCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=__lowercase , required=__lowercase )
expected.add_argument('''--required_str''' , type=__lowercase , required=__lowercase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__lowercase , )
self.argparsersEqual(__lowercase , __lowercase )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : str = HfArgumentParser(__lowercase )
lowerCAmelCase_ : int = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__lowercase , required=__lowercase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__lowercase , )
expected.add_argument('''--opt''' , type=__lowercase , default=__lowercase )
expected.add_argument('''--baz''' , default='''toto''' , type=__lowercase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__lowercase )
self.argparsersEqual(__lowercase , __lowercase )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = HfArgumentParser(__lowercase )
lowerCAmelCase_ : Dict = {
"foo": 1_2,
"bar": 3.14,
"baz": "42",
"flag": True,
}
lowerCAmelCase_ : Optional[int] = parser.parse_dict(__lowercase )[0]
lowerCAmelCase_ : Optional[int] = BasicExample(**__lowercase )
self.assertEqual(__lowercase , __lowercase )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : str = HfArgumentParser(__lowercase )
lowerCAmelCase_ : List[Any] = {
"foo": 1_2,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 4_2,
}
self.assertRaises(__lowercase , parser.parse_dict , __lowercase , allow_extra_keys=__lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Union[str, Any] = HfArgumentParser(__lowercase )
lowerCAmelCase_ : Optional[Any] = {
"foo": 1_2,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ : Dict = os.path.join(__lowercase , '''temp_json''' )
os.mkdir(__lowercase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(__lowercase , __lowercase )
lowerCAmelCase_ : List[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
lowerCAmelCase_ : str = BasicExample(**__lowercase )
self.assertEqual(__lowercase , __lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : str = HfArgumentParser(__lowercase )
lowerCAmelCase_ : str = {
"foo": 1_2,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ : str = os.path.join(__lowercase , '''temp_yaml''' )
os.mkdir(__lowercase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(__lowercase , __lowercase )
lowerCAmelCase_ : int = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
lowerCAmelCase_ : Union[str, Any] = BasicExample(**__lowercase )
self.assertEqual(__lowercase , __lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Union[str, Any] = HfArgumentParser(__lowercase )
self.assertIsNotNone(__lowercase ) | 715 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_UpperCAmelCase : Any ="""src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : Optional[Any] =direct_transformers_import(PATH_TO_TRANSFORMERS)
_UpperCAmelCase : List[str] =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_UpperCAmelCase : Dict =re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_UpperCAmelCase : Any ={
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : Any = None
# source code of `config_class`
lowerCAmelCase_ : Optional[int] = inspect.getsource(lowerCAmelCase_ )
lowerCAmelCase_ : str = _re_checkpoint.findall(lowerCAmelCase_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
lowerCAmelCase_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase_ : Tuple = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase_ : List[str] = ckpt_name
break
return checkpoint
def lowerCAmelCase ( )-> Optional[Any]:
lowerCAmelCase_ : Tuple = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase_ : int = get_checkpoint_from_config_class(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
lowerCAmelCase_ : List[Any] = '''\n'''.join(sorted(lowerCAmelCase_ ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 619 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Dict = 'gpt_neox'
def __init__( self : Optional[int] , _lowerCAmelCase : str=5_0432 , _lowerCAmelCase : Optional[Any]=6144 , _lowerCAmelCase : List[Any]=44 , _lowerCAmelCase : Union[str, Any]=64 , _lowerCAmelCase : List[str]=2_4576 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : str=0.25 , _lowerCAmelCase : List[Any]=1_0000 , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : List[str]=2048 , _lowerCAmelCase : Union[str, Any]=0.02 , _lowerCAmelCase : List[str]=1e-5 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : str , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = rotary_pct
__lowercase = rotary_emb_base
__lowercase = attention_dropout
__lowercase = hidden_dropout
__lowercase = classifier_dropout
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = use_cache
__lowercase = tie_word_embeddings
__lowercase = use_parallel_residual
__lowercase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def _a ( self : Any ) -> int:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowerCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'got {self.rope_scaling}' )
__lowercase = self.rope_scaling.get("""type""" , _lowerCAmelCase )
__lowercase = self.rope_scaling.get("""factor""" , _lowerCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 80 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowercase : Any = HUGGINGFACE_HUB_CACHE
lowercase : Any = "config.json"
lowercase : Any = "diffusion_pytorch_model.bin"
lowercase : Optional[Any] = "diffusion_flax_model.msgpack"
lowercase : Optional[Any] = "model.onnx"
lowercase : List[str] = "diffusion_pytorch_model.safetensors"
lowercase : Any = "weights.pb"
lowercase : Tuple = "https://huggingface.co"
lowercase : int = default_cache_path
lowercase : List[str] = "diffusers_modules"
lowercase : Tuple = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
lowercase : Tuple = ["fp16", "non-ema"]
lowercase : str = ".self_attn" | 327 | 0 |
"""simple docstring"""
from __future__ import annotations
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
__snake_case = 0
__snake_case = len(SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__snake_case = i + 1
else:
__snake_case = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 614 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1_00 , ) -> float:
"""simple docstring"""
__snake_case = x_start
__snake_case = fnc(SCREAMING_SNAKE_CASE )
__snake_case = 0.0
for _ in range(SCREAMING_SNAKE_CASE ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__snake_case = (x_end - x_start) / steps + xa
__snake_case = fnc(SCREAMING_SNAKE_CASE )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__snake_case = xa
__snake_case = fxa
return area
if __name__ == "__main__":
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
_SCREAMING_SNAKE_CASE = 10
while i <= 100_000:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 614 | 1 |
'''simple docstring'''
def snake_case_ ( lowercase__ , lowercase__ ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase__ : int = str(bin(lowercase__ ) )[2:] # remove the leading "0b"
UpperCAmelCase__ : Optional[Any] = str(bin(lowercase__ ) )[2:] # remove the leading "0b"
UpperCAmelCase__ : List[Any] = max(len(lowercase__ ) , len(lowercase__ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 199 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCAmelCase_ ( A ):
'''simple docstring'''
lowercase_ : torch.FloatTensor
class UpperCAmelCase_ ( A , A ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : Tuple[str] = ("DownEncoderBlock2D",) , snake_case__ : Tuple[str] = ("UpDecoderBlock2D",) , snake_case__ : Tuple[int] = (64,) , snake_case__ : int = 1 , snake_case__ : str = "silu" , snake_case__ : int = 3 , snake_case__ : int = 32 , snake_case__ : int = 2_56 , snake_case__ : int = 32 , snake_case__ : Optional[int] = None , snake_case__ : float = 0.18215 , snake_case__ : str = "group" , ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
UpperCAmelCase__ : Tuple = Encoder(
in_channels=snake_case__ , out_channels=snake_case__ , down_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , double_z=snake_case__ , )
UpperCAmelCase__ : str = vq_embed_dim if vq_embed_dim is not None else latent_channels
UpperCAmelCase__ : Tuple = nn.Convad(snake_case__ , snake_case__ , 1 )
UpperCAmelCase__ : int = VectorQuantizer(snake_case__ , snake_case__ , beta=0.25 , remap=snake_case__ , sane_index_shape=snake_case__ )
UpperCAmelCase__ : Tuple = nn.Convad(snake_case__ , snake_case__ , 1 )
# pass init params to Decoder
UpperCAmelCase__ : Union[str, Any] = Decoder(
in_channels=snake_case__ , out_channels=snake_case__ , up_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , norm_type=snake_case__ , )
@apply_forward_hook
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.encoder(snake_case__ )
UpperCAmelCase__ : List[str] = self.quant_conv(snake_case__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=snake_case__ )
@apply_forward_hook
def UpperCamelCase ( self : List[str] , snake_case__ : torch.FloatTensor , snake_case__ : bool = False , snake_case__ : bool = True ):
'''simple docstring'''
if not force_not_quantize:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = self.quantize(snake_case__ )
else:
UpperCAmelCase__ : Tuple = h
UpperCAmelCase__ : int = self.post_quant_conv(snake_case__ )
UpperCAmelCase__ : Optional[int] = self.decoder(snake_case__ , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
def UpperCamelCase ( self : Any , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
'''simple docstring'''
UpperCAmelCase__ : Dict = sample
UpperCAmelCase__ : int = self.encode(snake_case__ ).latents
UpperCAmelCase__ : Any = self.decode(snake_case__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
| 199 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = KandinskyVaaControlnetPipeline
__lowercase = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowercase = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowercase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowercase = False
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return 32
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return 32
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return 1_00
@property
def lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_snake_case = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_snake_case = UNetaDConditionModel(**lowerCAmelCase_ )
return model
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.dummy_unet
_snake_case = self.dummy_movq
_snake_case = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , steps_offset=1 , prediction_type='epsilon' , thresholding=lowerCAmelCase_ , )
_snake_case = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ):
"""simple docstring"""
_snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCAmelCase_ )
# create hint
_snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
if str(lowerCAmelCase_ ).startswith('mps' ):
_snake_case = torch.manual_seed(lowerCAmelCase_ )
else:
_snake_case = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_snake_case = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'cpu'
_snake_case = self.get_dummy_components()
_snake_case = self.pipeline_class(**lowerCAmelCase_ )
_snake_case = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = pipe(**self.get_dummy_inputs(lowerCAmelCase_ ) )
_snake_case = output.images
_snake_case = pipe(
**self.get_dummy_inputs(lowerCAmelCase_ ) , return_dict=lowerCAmelCase_ , )[0]
_snake_case = image[0, -3:, -3:, -1]
_snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
_snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
_snake_case = torch.from_numpy(np.array(lowerCAmelCase_ ) ).float() / 255.0
_snake_case = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_snake_case = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase_ )
_snake_case = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
_snake_case = pipeline.to(lowerCAmelCase_ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = 'A robot, 4k photo'
_snake_case = torch.Generator(device='cuda' ).manual_seed(0 )
_snake_case , _snake_case = pipe_prior(
lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_snake_case = torch.Generator(device='cuda' ).manual_seed(0 )
_snake_case = pipeline(
image_embeds=lowerCAmelCase_ , negative_image_embeds=lowerCAmelCase_ , hint=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=1_00 , output_type='np' , )
_snake_case = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
| 542 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __UpperCAmelCase ( _lowerCamelCase ):
@staticmethod
@abstractmethod
def lowerCamelCase ( lowerCAmelCase_ ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def lowerCamelCase ( self ):
"""simple docstring"""
raise NotImplementedError()
| 542 | 1 |
import random
from typing import Any
def __lowercase ( snake_case ):
"""simple docstring"""
for _ in range(len(snake_case ) ):
__magic_name__ :Optional[int] = random.randint(0, len(snake_case ) - 1 )
__magic_name__ :Union[str, Any] = random.randint(0, len(snake_case ) - 1 )
__magic_name__ , __magic_name__ :List[Any] = data[b], data[a]
return data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[Any] = [0, 1, 2, 3, 4, 5, 6, 7]
SCREAMING_SNAKE_CASE__ : int = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 0 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__a: Tuple = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: int = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Optional[Any] = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__a: Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 152 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int ) -> int:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowercase_ = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowercase_ = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowercase_ = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __UpperCAmelCase ( self , _a , _a , _a=None , _a=1 , _a="binary" , _a=None ):
__a = fa_score(
_a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a )
return {"f1": float(_a ) if score.size == 1 else score}
| 65 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
SCREAMING_SNAKE_CASE : Tuple = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _UpperCamelCase ( lowerCAmelCase__: Optional[int] ) -> Tuple:
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE_ = k.replace(lowerCAmelCase__ ,lowerCAmelCase__ )
return k
def _UpperCamelCase ( lowerCAmelCase__: dict ,lowerCAmelCase__: dict ) -> List[str]:
SCREAMING_SNAKE_CASE_ = DEFAULTS.copy()
cfg_kwargs.update(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = PegasusConfig(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = PegasusForConditionalGeneration(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE_ = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE_ = rename_state_dict_key(lowerCAmelCase__ )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE_ = v.T
SCREAMING_SNAKE_CASE_ = torch.tensor(lowerCAmelCase__ ,dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE_ = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE_ = mapping["""shared.weight"""]
SCREAMING_SNAKE_CASE_ = mapping["""shared.weight"""]
SCREAMING_SNAKE_CASE_ = {k: torch.zeros_like(lowerCAmelCase__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = torch_model.model.load_state_dict(lowerCAmelCase__ ,strict=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def _UpperCamelCase ( lowerCAmelCase__: str="./ckpt/aeslc/model.ckpt-32000" ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = tf.train.list_variables(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(lowerCAmelCase__ ,desc='converting tf checkpoint to dict' ):
SCREAMING_SNAKE_CASE_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE_ = tf.train.load_variable(lowerCAmelCase__ ,lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = array
return tf_weights
def _UpperCamelCase ( lowerCAmelCase__: str ,lowerCAmelCase__: str ) -> Optional[Any]:
# save tokenizer first
SCREAMING_SNAKE_CASE_ = Path(lowerCAmelCase__ ).parent.name
SCREAMING_SNAKE_CASE_ = task_specific_params[F"""summarization_{dataset}"""]["""max_position_embeddings"""]
SCREAMING_SNAKE_CASE_ = PegasusTokenizer.from_pretrained('sshleifer/pegasus' ,model_max_length=lowerCAmelCase__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(lowerCAmelCase__ )
# convert model
SCREAMING_SNAKE_CASE_ = get_tf_weights_as_numpy(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
SCREAMING_SNAKE_CASE_ = task_specific_params
SCREAMING_SNAKE_CASE_ = convert_pegasus(lowerCAmelCase__ ,lowerCAmelCase__ )
torch_model.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(lowerCAmelCase__ ,Path(lowerCAmelCase__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
SCREAMING_SNAKE_CASE : int = parser.parse_args()
if args.save_dir is None:
SCREAMING_SNAKE_CASE : List[str] = Path(args.tf_ckpt_path).parent.name
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 294 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'spiece.model'}
__magic_name__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
__magic_name__ = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = []
def __init__( self : Optional[int] ,_a : int ,_a : Optional[Any]="<unk>" ,_a : int="<s>" ,_a : str="</s>" ,_a : Optional[Any]="<pad>" ,_a : Tuple="[SEP]" ,_a : Tuple="[MASK]" ,_a : Union[str, Any]="[CLS]" ,_a : Optional[Dict[str, Any]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : List[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
A_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,pad_token=_a ,sep_token=_a ,mask_token=_a ,cls_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
A_ : Optional[int] = vocab_file
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Tuple = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.__dict__.copy()
A_ : Union[str, Any] = None
return state
def __setstate__( self : List[Any] ,_a : Any ):
'''simple docstring'''
A_ : Tuple = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
A_ : Tuple = {}
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : Union[str, Any] ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def _a ( self : Optional[int] ,_a : str ):
'''simple docstring'''
return self.sp_model.piece_to_id(_a )
def _a ( self : int ,_a : Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.sp_model.IdToPiece(_a )
return token
def _a ( self : Dict ,_a : int ):
'''simple docstring'''
A_ : int = []
A_ : Any = """"""
A_ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
A_ : Dict = True
A_ : Union[str, Any] = []
else:
current_sub_tokens.append(_a )
A_ : str = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def _a ( self : int ,_a : List[int] ,_a : bool = False ,_a : bool = None ,_a : bool = True ,**_a : str ,):
'''simple docstring'''
A_ : Any = kwargs.pop("""use_source_tokenizer""" ,_a )
A_ : Union[str, Any] = self.convert_ids_to_tokens(_a ,skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ : str = []
A_ : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
A_ : List[str] = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
A_ : Optional[int] = re.sub(r""" (\[(MASK|SEP)\])""" ,r"""\1""" ,""" """.join(_a ) )
else:
A_ : Tuple = """""".join(_a )
A_ : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ : Optional[Any] = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,"""wb""" ) as fi:
A_ : str = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : List[Any] = [self.cls_token_id]
A_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def _a ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Tuple = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 665 | 0 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__snake_case = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
__snake_case = dataset.iloc[:, 1:2].values
__snake_case = dataset.iloc[:, 2].values
__snake_case , __snake_case , __snake_case , __snake_case = train_test_split(X, y, test_size=0.2, random_state=0)
__snake_case = PolynomialFeatures(degree=4)
__snake_case = poly_reg.fit_transform(X)
__snake_case = LinearRegression()
pol_reg.fit(X_poly, y)
def _lowerCamelCase ( ):
plt.scatter(lowerCamelCase__ , lowerCamelCase__ , color="""red""" )
plt.plot(lowerCamelCase__ , pol_reg.predict(poly_reg.fit_transform(lowerCamelCase__ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003 | 128 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__snake_case = sys.version_info >= (3, 10)
def _lowerCamelCase ( lowerCamelCase__ : Tuple=None , lowerCamelCase__ : Any=None ):
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : int
_a : float
_a : str
_a : bool
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : int = 42
_a : str = field(default='''toto''' , metadata={'''help''': '''help message'''} )
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : bool = False
_a : bool = True
_a : Optional[bool] = None
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
_a : Optional[Any] = '''titi'''
_a : str = '''toto'''
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
_a : Union[str, Any] = '''titi'''
_a : Union[str, Any] = '''toto'''
_a : Tuple = 42
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : BasicEnum = "toto"
def UpperCAmelCase__( self ) -> Optional[Any]:
lowercase__ : List[str] = BasicEnum(self.foo )
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : MixedTypeEnum = "toto"
def UpperCAmelCase__( self ) -> Union[str, Any]:
lowercase__ : Any = MixedTypeEnum(self.foo )
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : Optional[int] = None
_a : Optional[float] = field(default=__UpperCAmelCase , metadata={'''help''': '''help message'''} )
_a : Optional[str] = None
_a : Optional[List[str]] = list_field(default=[] )
_a : Optional[List[int]] = list_field(default=[] )
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : List[int] = list_field(default=[] )
_a : List[int] = list_field(default=[1, 2, 3] )
_a : List[str] = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
_a : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : List[int] = field()
_a : str = field()
_a : BasicEnum = field()
def UpperCAmelCase__( self ) -> Optional[int]:
lowercase__ : Optional[Any] = BasicEnum(self.required_enum )
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : int
_a : "BasicEnum" = field()
_a : "Optional[bool]" = None
_a : "str" = field(default='''toto''' , metadata={'''help''': '''help message'''} )
_a : "List[str]" = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
if is_python_no_less_than_3_10:
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : bool = False
_a : bool = True
_a : bool | None = None
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : int | None = None
_a : float | None = field(default=__UpperCAmelCase , metadata={'''help''': '''help message'''} )
_a : str | None = None
_a : list[str] | None = list_field(default=[] )
_a : list[int] | None = list_field(default=[] )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowercase__ : int = {k: v for k, v in vars(lowerCamelCase__ ).items() if k != """container"""}
lowercase__ : Any = {k: v for k, v in vars(lowerCamelCase__ ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , lowerCamelCase__ ) and yy.get("""choices""" , lowerCamelCase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](lowerCamelCase__ ) , yy["""type"""](lowerCamelCase__ ) )
del xx["type"], yy["type"]
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : Any = HfArgumentParser(lowerCamelCase__ )
lowercase__ : int = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowerCamelCase__ , required=lowerCamelCase__ )
expected.add_argument("""--bar""" , type=lowerCamelCase__ , required=lowerCamelCase__ )
expected.add_argument("""--baz""" , type=lowerCamelCase__ , required=lowerCamelCase__ )
expected.add_argument("""--flag""" , type=lowerCamelCase__ , default=lowerCamelCase__ , const=lowerCamelCase__ , nargs="""?""" )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Tuple = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((lowercase__) , ) : Optional[int] = parser.parse_args_into_dataclasses(lowerCamelCase__ , look_for_args_file=lowerCamelCase__ )
self.assertFalse(example.flag )
def UpperCAmelCase__( self ) -> List[Any]:
lowercase__ : List[str] = HfArgumentParser(lowerCamelCase__ )
lowercase__ : int = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=lowerCamelCase__ )
expected.add_argument("""--baz""" , default="""toto""" , type=lowerCamelCase__ , help="""help message""" )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> List[Any]:
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowerCamelCase__ , default=lowerCamelCase__ , const=lowerCamelCase__ , nargs="""?""" )
expected.add_argument("""--baz""" , type=lowerCamelCase__ , default=lowerCamelCase__ , const=lowerCamelCase__ , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=lowerCamelCase__ , dest="""baz""" )
expected.add_argument("""--opt""" , type=lowerCamelCase__ , default=lowerCamelCase__ )
lowercase__ : Any = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCamelCase__ )
for dataclass_type in dataclass_types:
lowercase__ : Optional[int] = HfArgumentParser(lowerCamelCase__ )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Optional[Any] = parser.parse_args([] )
self.assertEqual(lowerCamelCase__ , Namespace(foo=lowerCamelCase__ , baz=lowerCamelCase__ , opt=lowerCamelCase__ ) )
lowercase__ : Optional[Any] = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(lowerCamelCase__ , Namespace(foo=lowerCamelCase__ , baz=lowerCamelCase__ , opt=lowerCamelCase__ ) )
lowercase__ : str = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(lowerCamelCase__ , Namespace(foo=lowerCamelCase__ , baz=lowerCamelCase__ , opt=lowerCamelCase__ ) )
lowercase__ : List[str] = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(lowerCamelCase__ , Namespace(foo=lowerCamelCase__ , baz=lowerCamelCase__ , opt=lowerCamelCase__ ) )
lowercase__ : Dict = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(lowerCamelCase__ , Namespace(foo=lowerCamelCase__ , baz=lowerCamelCase__ , opt=lowerCamelCase__ ) )
def UpperCAmelCase__( self ) -> Optional[int]:
lowercase__ : Dict = HfArgumentParser(lowerCamelCase__ )
lowercase__ : Tuple = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Optional[int] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
lowercase__ : Optional[int] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowercase__ : Union[str, Any] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
lowercase__ : List[str] = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowercase__ : Tuple = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
lowercase__ : List[str] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def UpperCAmelCase__( self ) -> List[str]:
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : Literal["titi", "toto", 42] = "toto"
lowercase__ : Tuple = HfArgumentParser(lowerCamelCase__ )
lowercase__ : Tuple = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
lowercase__ : Any = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
lowercase__ : int = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def UpperCAmelCase__( self ) -> int:
lowercase__ : Optional[Any] = HfArgumentParser(lowerCamelCase__ )
lowercase__ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=lowerCamelCase__ )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=lowerCamelCase__ )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowerCamelCase__ )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=lowerCamelCase__ )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : str = parser.parse_args([] )
self.assertEqual(
lowerCamelCase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
lowercase__ : List[str] = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(lowerCamelCase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def UpperCAmelCase__( self ) -> Dict:
lowercase__ : Any = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=lowerCamelCase__ , type=lowerCamelCase__ )
expected.add_argument("""--bar""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""help message""" )
expected.add_argument("""--baz""" , default=lowerCamelCase__ , type=lowerCamelCase__ )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=lowerCamelCase__ )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=lowerCamelCase__ )
lowercase__ : Tuple = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCamelCase__ )
for dataclass_type in dataclass_types:
lowercase__ : Dict = HfArgumentParser(lowerCamelCase__ )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : List[Any] = parser.parse_args([] )
self.assertEqual(lowerCamelCase__ , Namespace(foo=lowerCamelCase__ , bar=lowerCamelCase__ , baz=lowerCamelCase__ , ces=[] , des=[] ) )
lowercase__ : Optional[Any] = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(lowerCamelCase__ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def UpperCAmelCase__( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = HfArgumentParser(lowerCamelCase__ )
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=lowerCamelCase__ , required=lowerCamelCase__ )
expected.add_argument("""--required_str""" , type=lowerCamelCase__ , required=lowerCamelCase__ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowerCamelCase__ , )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> List[Any]:
lowercase__ : List[Any] = HfArgumentParser(lowerCamelCase__ )
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowerCamelCase__ , required=lowerCamelCase__ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowerCamelCase__ , )
expected.add_argument("""--opt""" , type=lowerCamelCase__ , default=lowerCamelCase__ )
expected.add_argument("""--baz""" , default="""toto""" , type=lowerCamelCase__ , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowerCamelCase__ )
self.argparsersEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> str:
lowercase__ : Any = HfArgumentParser(lowerCamelCase__ )
lowercase__ : Tuple = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
lowercase__ : int = parser.parse_dict(lowerCamelCase__ )[0]
lowercase__ : List[str] = BasicExample(**lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : Union[str, Any] = HfArgumentParser(lowerCamelCase__ )
lowercase__ : Dict = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(lowerCamelCase__ , parser.parse_dict , lowerCamelCase__ , allow_extra_keys=lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : List[str] = HfArgumentParser(lowerCamelCase__ )
lowercase__ : List[Any] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : int = os.path.join(lowerCamelCase__ , """temp_json""" )
os.mkdir(lowerCamelCase__ )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Dict = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
lowercase__ : Any = BasicExample(**lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = HfArgumentParser(lowerCamelCase__ )
lowercase__ : Union[str, Any] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Any = os.path.join(lowerCamelCase__ , """temp_yaml""" )
os.mkdir(lowerCamelCase__ )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Optional[int] = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
lowercase__ : Dict = BasicExample(**lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Optional[int]:
lowercase__ : Optional[int] = HfArgumentParser(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ ) | 128 | 1 |
def _a ( __lowercase ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = []
if len(__lowercase ) == 1:
return [nums.copy()]
for _ in range(len(__lowercase ) ):
__UpperCamelCase = nums.pop(0 )
__UpperCamelCase = permute(__lowercase )
for perm in permutations:
perm.append(__lowercase )
result.extend(__lowercase )
nums.append(__lowercase )
return result
def _a ( __lowercase ) -> Optional[int]:
"""simple docstring"""
def backtrack(__lowercase ):
if start == len(__lowercase ) - 1:
output.append(nums[:] )
else:
for i in range(__lowercase , len(__lowercase ) ):
__UpperCamelCase = nums[i], nums[start]
backtrack(start + 1 )
__UpperCamelCase = nums[i], nums[start] # backtrack
__UpperCamelCase = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
_snake_case = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 383 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : List[str] = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 170 | 0 |
from typing import Any
class A :
'''simple docstring'''
def __init__( self : str , __lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
A__ = data
A__ = None
class A :
'''simple docstring'''
def __init__( self : str ) -> Union[str, Any]:
"""simple docstring"""
A__ = None
def a_ ( self : Dict ) -> Dict:
"""simple docstring"""
A__ = self.head
while temp is not None:
print(temp.data , end=""" """ )
A__ = temp.next
print()
def a_ ( self : int , __lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
A__ = Node(__lowerCAmelCase )
A__ = self.head
A__ = new_node
def a_ ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
A__ = self.head
while node_a is not None and node_a.data != node_data_a:
A__ = node_a.next
A__ = self.head
while node_a is not None and node_a.data != node_data_a:
A__ = node_a.next
if node_a is None or node_a is None:
return
A__ , A__ = node_a.data, node_a.data
if __name__ == "__main__":
A : Optional[int] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 247 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A : Any = 1_6
A : List[Any] = 3_2
def __lowerCamelCase ( __a :Accelerator , __a :int = 1_6 ) -> str:
"""simple docstring"""
A__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
A__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__a :Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
__a , batched=__a , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__a :Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 1_6
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
__a , padding="""longest""" , max_length=__a , pad_to_multiple_of=__a , return_tensors="""pt""" , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__a , collate_fn=__a , batch_size=__a )
A__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A : Optional[Any] = mocked_dataloaders # noqa: F811
def __lowerCamelCase ( __a :List[Any] , __a :Optional[Any] ) -> List[Any]:
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __a ) == "1":
A__ = 2
# New Code #
A__ = int(args.gradient_accumulation_steps )
# Initialize accelerator
A__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__a )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config["""lr"""]
A__ = int(config["""num_epochs"""] )
A__ = int(config["""seed"""] )
A__ = int(config["""batch_size"""] )
A__ = evaluate.load("""glue""" , """mrpc""" )
set_seed(__a )
A__ , A__ = get_dataloaders(__a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=1_0_0 , num_training_steps=(len(__a ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
__a , __a , __a , __a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__a ):
A__ = model(**__a )
A__ = output.loss
accelerator.backward(__a )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**__a )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__a , references=__a , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , __a )
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__a , default=__a , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__a , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
A__ = parser.parse_args()
A__ = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 247 | 1 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : Tuple =logging.get_logger(__name__)
__magic_name__ : Optional[int] ={
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
__magic_name__ : Union[str, Any] ={
'b0': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 2_24,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 2_40,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 14_08,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 2_60,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 15_36,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 3_00,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 17_92,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 3_80,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 20_48,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 4_56,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 23_04,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 5_28,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 25_60,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 6_00,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def __snake_case ( lowerCamelCase_ : List[str] ):
'''simple docstring'''
__magic_name__ = EfficientNetConfig()
__magic_name__ = CONFIG_MAP[model_name]["hidden_dim"]
__magic_name__ = CONFIG_MAP[model_name]["width_coef"]
__magic_name__ = CONFIG_MAP[model_name]["depth_coef"]
__magic_name__ = CONFIG_MAP[model_name]["image_size"]
__magic_name__ = CONFIG_MAP[model_name]["dropout_rate"]
__magic_name__ = CONFIG_MAP[model_name]["dw_padding"]
__magic_name__ = "huggingface/label-files"
__magic_name__ = "imagenet-1k-id2label.json"
__magic_name__ = 1000
__magic_name__ = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="dataset" ) , "r" ) )
__magic_name__ = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
__magic_name__ = idalabel
__magic_name__ = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
__magic_name__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
def __snake_case ( lowerCamelCase_ : List[Any] ):
'''simple docstring'''
__magic_name__ = CONFIG_MAP[model_name]["image_size"]
__magic_name__ = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=lowerCamelCase_ , )
return preprocessor
def __snake_case ( lowerCamelCase_ : Tuple ):
'''simple docstring'''
__magic_name__ = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
__magic_name__ = sorted(set(lowerCamelCase_ ) )
__magic_name__ = len(lowerCamelCase_ )
__magic_name__ = {b: str(lowerCamelCase_ ) for b, i in zip(lowerCamelCase_ , range(lowerCamelCase_ ) )}
__magic_name__ = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
__magic_name__ = block_name_mapping[b]
rename_keys.append((F'block{b}_expand_conv/kernel:0', F'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((F'block{b}_expand_bn/gamma:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((F'block{b}_expand_bn/beta:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(F'block{b}_expand_bn/moving_mean:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(F'block{b}_expand_bn/moving_variance:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(F'block{b}_dwconv/depthwise_kernel:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((F'block{b}_bn/gamma:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((F'block{b}_bn/beta:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(F'block{b}_bn/moving_mean:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(F'block{b}_bn/moving_variance:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((F'block{b}_se_reduce/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((F'block{b}_se_reduce/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((F'block{b}_se_expand/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((F'block{b}_se_expand/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(F'block{b}_project_conv/kernel:0', F'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((F'block{b}_project_bn/gamma:0', F'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((F'block{b}_project_bn/beta:0', F'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(F'block{b}_project_bn/moving_mean:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(F'block{b}_project_bn/moving_variance:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
__magic_name__ = {}
for item in rename_keys:
if item[0] in original_param_names:
__magic_name__ = "efficientnet." + item[1]
__magic_name__ = "classifier.weight"
__magic_name__ = "classifier.bias"
return key_mapping
def __snake_case ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
__magic_name__ = key_mapping[key]
if "_conv" in key and "kernel" in key:
__magic_name__ = torch.from_numpy(lowerCamelCase_ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__magic_name__ = torch.from_numpy(lowerCamelCase_ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__magic_name__ = torch.from_numpy(np.transpose(lowerCamelCase_ ) )
else:
__magic_name__ = torch.from_numpy(lowerCamelCase_ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowerCamelCase_ )
@torch.no_grad()
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = model_classes[model_name](
include_top=lowerCamelCase_ , weights="imagenet" , input_tensor=lowerCamelCase_ , input_shape=lowerCamelCase_ , pooling=lowerCamelCase_ , classes=1000 , classifier_activation="softmax" , )
__magic_name__ = original_model.trainable_variables
__magic_name__ = original_model.non_trainable_variables
__magic_name__ = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__magic_name__ = param.numpy()
__magic_name__ = list(tf_params.keys() )
# Load HuggingFace model
__magic_name__ = get_efficientnet_config(lowerCamelCase_ )
__magic_name__ = EfficientNetForImageClassification(lowerCamelCase_ ).eval()
__magic_name__ = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
__magic_name__ = rename_keys(lowerCamelCase_ )
replace_params(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Initialize preprocessor and preprocess input image
__magic_name__ = convert_image_processor(lowerCamelCase_ )
__magic_name__ = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
__magic_name__ = hf_model(**lowerCamelCase_ )
__magic_name__ = outputs.logits.detach().numpy()
# Original model inference
__magic_name__ = False
__magic_name__ = CONFIG_MAP[model_name]["image_size"]
__magic_name__ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__magic_name__ = image.img_to_array(lowerCamelCase_ )
__magic_name__ = np.expand_dims(lowerCamelCase_ , axis=0 )
__magic_name__ = original_model.predict(lowerCamelCase_ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowerCamelCase_ ):
os.mkdir(lowerCamelCase_ )
# Save converted model and image processor
hf_model.save_pretrained(lowerCamelCase_ )
preprocessor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
# Push model and image processor to hub
print(F'Pushing converted {model_name} to the hub...' )
__magic_name__ = F'efficientnet-{model_name}'
preprocessor.push_to_hub(lowerCamelCase_ )
hf_model.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
__magic_name__ : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
__magic_name__ : Any =parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 664 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ) -> str:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__magic_name__ = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
self.assertTrue(isinstance(dc.token_ids , _lowerCamelCase ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __A ( self : List[Any] ) -> str:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__magic_name__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(_lowerCamelCase ) # fails here
def __A ( self : List[Any] ) -> int:
__magic_name__ = [[1, 2, 3], [1, 2, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(3 )
__magic_name__ = stepped is True and completed is True and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __A ( self : Any ) -> Union[str, Any]:
__magic_name__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 664 | 1 |
"""simple docstring"""
class a :
"""simple docstring"""
def __init__( self: Any ):
"""simple docstring"""
A__ = """"""
A__ = """"""
A__ = []
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: List[Any] , UpperCamelCase: Any ):
"""simple docstring"""
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
A__ = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
A__ = self.__min_dist_top_down_dp(_SCREAMING_SNAKE_CASE , n - 1 )
A__ = self.__min_dist_top_down_dp(m - 1 , _SCREAMING_SNAKE_CASE )
A__ = self.__min_dist_top_down_dp(m - 1 , n - 1 )
A__ = 1 + min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return self.dp[m][n]
def UpperCamelCase ( self: Tuple , UpperCamelCase: Tuple , UpperCamelCase: str ):
"""simple docstring"""
A__ = worda
A__ = worda
A__ = [[-1 for _ in range(len(_SCREAMING_SNAKE_CASE ) )] for _ in range(len(_SCREAMING_SNAKE_CASE ) )]
return self.__min_dist_top_down_dp(len(_SCREAMING_SNAKE_CASE ) - 1 , len(_SCREAMING_SNAKE_CASE ) - 1 )
def UpperCamelCase ( self: List[str] , UpperCamelCase: int , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
A__ = worda
A__ = worda
A__ = len(_SCREAMING_SNAKE_CASE )
A__ = len(_SCREAMING_SNAKE_CASE )
A__ = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
A__ = j
elif j == 0: # second string is empty
A__ = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
A__ = self.dp[i - 1][j - 1]
else:
A__ = self.dp[i][j - 1]
A__ = self.dp[i - 1][j]
A__ = self.dp[i - 1][j - 1]
A__ = 1 + min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return self.dp[m][n]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : str = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input('Enter the first string: ').strip()
SCREAMING_SNAKE_CASE_ : str = input('Enter the second string: ').strip()
print()
print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 713 |
"""simple docstring"""
from __future__ import annotations
class a :
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: str , UpperCamelCase: str ):
"""simple docstring"""
A__ , A__ = text, pattern
A__ , A__ = len(UpperCamelCase ), len(UpperCamelCase )
def UpperCamelCase ( self: Dict , UpperCamelCase: str ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def UpperCamelCase ( self: str , UpperCamelCase: int ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = []
for i in range(self.textLen - self.patLen + 1 ):
A__ = self.mismatch_in_text(UpperCamelCase )
if mismatch_index == -1:
positions.append(UpperCamelCase )
else:
A__ = self.match_in_pattern(self.text[mismatch_index] )
A__ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
SCREAMING_SNAKE_CASE_ : List[Any] = 'ABAABA'
SCREAMING_SNAKE_CASE_ : List[Any] = 'AB'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BoyerMooreSearch(text, pattern)
SCREAMING_SNAKE_CASE_ : int = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 500 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str=7 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Tuple=18 , __SCREAMING_SNAKE_CASE : Tuple=30 , __SCREAMING_SNAKE_CASE : Union[str, Any]=400 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __SCREAMING_SNAKE_CASE : int=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __SCREAMING_SNAKE_CASE : Dict=True , ) -> Optional[Any]:
__UpperCAmelCase =size if size is not None else {"""height""": 224, """width""": 224}
__UpperCAmelCase =crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =num_channels
__UpperCAmelCase =image_size
__UpperCAmelCase =min_resolution
__UpperCAmelCase =max_resolution
__UpperCAmelCase =do_resize
__UpperCAmelCase =size
__UpperCAmelCase =do_center_crop
__UpperCAmelCase =crop_size
__UpperCAmelCase =do_normalize
__UpperCAmelCase =image_mean
__UpperCAmelCase =image_std
__UpperCAmelCase =do_convert_rgb
def _a ( self : Any ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def _a ( self : str , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : int=False ) -> List[Any]:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__UpperCAmelCase =[]
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
__UpperCAmelCase =[]
for i in range(self.batch_size ):
__UpperCAmelCase , __UpperCAmelCase =np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__UpperCAmelCase =[Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
if torchify:
__UpperCAmelCase =[torch.from_numpy(__SCREAMING_SNAKE_CASE ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Dict = ChineseCLIPImageProcessor if is_vision_available() else None
def _a ( self : List[Any] ) -> List[Any]:
__UpperCAmelCase =ChineseCLIPImageProcessingTester(self , do_center_crop=__SCREAMING_SNAKE_CASE )
@property
def _a ( self : Optional[int] ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_center_crop""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """center_crop""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_convert_rgb""" ) )
def _a ( self : Optional[int] ) -> str:
__UpperCAmelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 224, """width""": 224} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
__UpperCAmelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _a ( self : Optional[int] ) -> Optional[int]:
pass
def _a ( self : Optional[int] ) -> Optional[int]:
# Initialize image_processing
__UpperCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase =self.image_processor_tester.prepare_inputs(equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__UpperCAmelCase =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__UpperCAmelCase =image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self : Tuple ) -> Union[str, Any]:
# Initialize image_processing
__UpperCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase =self.image_processor_tester.prepare_inputs(equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__UpperCAmelCase =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__UpperCAmelCase =image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self : Tuple ) -> Any:
# Initialize image_processing
__UpperCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase =self.image_processor_tester.prepare_inputs(equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__UpperCAmelCase =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__UpperCAmelCase =image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
@require_torch
@require_vision
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None
def _a ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase =ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =3
@property
def _a ( self : Optional[Any] ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Dict ) -> Optional[int]:
__UpperCAmelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_center_crop""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """center_crop""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_convert_rgb""" ) )
def _a ( self : List[Any] ) -> Tuple:
pass
def _a ( self : List[Any] ) -> Dict:
# Initialize image_processing
__UpperCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase =self.image_processor_tester.prepare_inputs(equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__UpperCAmelCase =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__UpperCAmelCase =image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 68 |
def lowercase__ ( A_: int , A_: int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def lowercase__ ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 68 | 1 |
from ...processing_utils import ProcessorMixin
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
_A = 'SpeechT5FeatureExtractor'
_A = 'SpeechT5Tokenizer'
def __init__(self , __a , __a ):
'''simple docstring'''
super().__init__(__a , __a )
def __call__(self , *__a , **__a ):
'''simple docstring'''
lowerCamelCase = kwargs.pop("audio" , __a )
lowerCamelCase = kwargs.pop("text" , __a )
lowerCamelCase = kwargs.pop("text_target" , __a )
lowerCamelCase = kwargs.pop("audio_target" , __a )
lowerCamelCase = kwargs.pop("sampling_rate" , __a )
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" )
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." )
if audio is not None:
lowerCamelCase = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a )
elif text is not None:
lowerCamelCase = self.tokenizer(__a , **__a )
else:
lowerCamelCase = None
if audio_target is not None:
lowerCamelCase = self.feature_extractor(audio_target=__a , *__a , sampling_rate=__a , **__a )
lowerCamelCase = targets["input_values"]
elif text_target is not None:
lowerCamelCase = self.tokenizer(__a , **__a )
lowerCamelCase = targets["input_ids"]
else:
lowerCamelCase = None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase = labels
lowerCamelCase = targets.get("attention_mask" )
if decoder_attention_mask is not None:
lowerCamelCase = decoder_attention_mask
return inputs
def _a (self , *__a , **__a ):
'''simple docstring'''
lowerCamelCase = kwargs.pop("input_values" , __a )
lowerCamelCase = kwargs.pop("input_ids" , __a )
lowerCamelCase = kwargs.pop("labels" , __a )
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs." )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." )
if input_values is not None:
lowerCamelCase = self.feature_extractor.pad(__a , *__a , **__a )
elif input_ids is not None:
lowerCamelCase = self.tokenizer.pad(__a , **__a )
else:
lowerCamelCase = None
if labels is not None:
if "input_ids" in labels or (isinstance(__a , __a ) and "input_ids" in labels[0]):
lowerCamelCase = self.tokenizer.pad(__a , **__a )
lowerCamelCase = targets["input_ids"]
else:
lowerCamelCase = self.feature_extractor.feature_size
lowerCamelCase = self.feature_extractor.num_mel_bins
lowerCamelCase = self.feature_extractor.pad(__a , *__a , **__a )
lowerCamelCase = feature_size_hack
lowerCamelCase = targets["input_values"]
else:
lowerCamelCase = None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase = labels
lowerCamelCase = targets.get("attention_mask" )
if decoder_attention_mask is not None:
lowerCamelCase = decoder_attention_mask
return inputs
def _a (self , *__a , **__a ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__a , **__a )
def _a (self , *__a , **__a ):
'''simple docstring'''
return self.tokenizer.decode(*__a , **__a ) | 484 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
lowerCamelCase = SMALL_MODEL_IDENTIFIER
lowerCamelCase = "pt"
lowerCamelCase = "tf"
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__a )
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=__a )
model_tf.save_pretrained(__a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = "mock_framework"
# Framework provided - return whatever the user provides
lowerCamelCase = FeaturesManager.determine_framework(self.test_model , __a )
self.assertEqual(__a , __a )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
lowerCamelCase = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
lowerCamelCase = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
def _a (self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
lowerCamelCase = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
lowerCamelCase = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__a ):
lowerCamelCase = FeaturesManager.determine_framework(__a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_tf_available" , __a ):
lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowerCamelCase = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_torch_available" , __a ):
lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_tf )
# Both in environment -> use PyTorch
lowerCamelCase = MagicMock(return_value=__a )
lowerCamelCase = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_tf_available" , __a ), patch(
"transformers.onnx.features.is_torch_available" , __a ):
lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# Both not in environment -> raise error
lowerCamelCase = MagicMock(return_value=__a )
lowerCamelCase = MagicMock(return_value=__a )
with patch("transformers.onnx.features.is_tf_available" , __a ), patch(
"transformers.onnx.features.is_torch_available" , __a ):
with self.assertRaises(__a ):
lowerCamelCase = FeaturesManager.determine_framework(self.test_model ) | 484 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class a ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
@property
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __magic_name__ ( self : str ):
'''simple docstring'''
snake_case__ : List[Any] = ort.SessionOptions()
snake_case__ : Any = False
return options
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
snake_case__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
snake_case__ : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case__ : List[str] = '''A red cat sitting on a park bench'''
snake_case__ : str = np.random.RandomState(0 )
snake_case__ : Tuple = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=1_0 , generator=_lowercase , output_type='''np''' , )
snake_case__ : str = output.images
snake_case__ : Dict = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : int = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
snake_case__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
snake_case__ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
snake_case__ : List[Any] = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
snake_case__ : Tuple = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case__ : str = '''A red cat sitting on a park bench'''
snake_case__ : List[Any] = np.random.RandomState(0 )
snake_case__ : Dict = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=2_0 , generator=_lowercase , output_type='''np''' , )
snake_case__ : int = output.images
snake_case__ : Tuple = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : List[str] = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 347 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self : Optional[int] , _lowercase : int = 1_28 , _lowercase : int = 2_56 , _lowercase : float = 2_0_0_0.0 , _lowercase : int = 7_68 , _lowercase : int = 12 , _lowercase : int = 12 , _lowercase : int = 64 , _lowercase : int = 20_48 , _lowercase : float = 0.1 , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Sequential(
nn.Linear(_lowercase , d_model * 4 , bias=_lowercase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowercase ) , nn.SiLU() , )
UpperCAmelCase__ = nn.Embedding(_lowercase , _lowercase )
UpperCAmelCase__ = False
UpperCAmelCase__ = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
UpperCAmelCase__ = nn.Dropout(p=_lowercase )
UpperCAmelCase__ = nn.ModuleList()
for lyr_num in range(_lowercase ):
# FiLM conditional T5 decoder
UpperCAmelCase__ = DecoderLayer(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
self.decoders.append(_lowercase )
UpperCAmelCase__ = TaLayerNorm(_lowercase )
UpperCAmelCase__ = nn.Dropout(p=_lowercase )
UpperCAmelCase__ = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
def _UpperCAmelCase ( self : List[str] , _lowercase : Dict , _lowercase : Any ):
"""simple docstring"""
UpperCAmelCase__ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def _UpperCAmelCase ( self : Dict , _lowercase : List[Any] , _lowercase : Union[str, Any] , _lowercase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCAmelCase__ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCAmelCase__ = self.conditioning_emb(_lowercase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCAmelCase__ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCAmelCase__ = torch.broadcast_to(
torch.arange(_lowercase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCAmelCase__ = self.position_encoding(_lowercase )
UpperCAmelCase__ = self.continuous_inputs_projection(_lowercase )
inputs += position_encodings
UpperCAmelCase__ = self.dropout(_lowercase )
# decoder: No padding present.
UpperCAmelCase__ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCAmelCase__ = [(x, self.encoder_decoder_mask(_lowercase , _lowercase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCAmelCase__ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCAmelCase__ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCAmelCase__ = lyr(
_lowercase , conditioning_emb=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )[0]
UpperCAmelCase__ = self.decoder_norm(_lowercase )
UpperCAmelCase__ = self.post_dropout(_lowercase )
UpperCAmelCase__ = self.spec_out(_lowercase )
return spec_out
class lowercase__ ( nn.Module ):
def __init__( self : str , _lowercase : Union[str, Any] , _lowercase : Dict , _lowercase : int , _lowercase : int , _lowercase : Optional[int] , _lowercase : Union[str, Any]=1E-6 ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase ) )
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : List[Any] , _lowercase : Union[str, Any]=None , _lowercase : Dict=None , _lowercase : int=None , _lowercase : Optional[int]=None , _lowercase : Any=None , ):
"""simple docstring"""
UpperCAmelCase__ = self.layer[0](
_lowercase , conditioning_emb=_lowercase , attention_mask=_lowercase , )
if encoder_hidden_states is not None:
UpperCAmelCase__ = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
UpperCAmelCase__ = self.layer[1](
_lowercase , key_value_states=_lowercase , attention_mask=_lowercase , )
# Apply Film Conditional Feed Forward layer
UpperCAmelCase__ = self.layer[-1](_lowercase , _lowercase )
return (hidden_states,)
class lowercase__ ( nn.Module ):
def __init__( self : List[str] , _lowercase : List[Any] , _lowercase : Any , _lowercase : Union[str, Any] , _lowercase : str ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = TaLayerNorm(_lowercase )
UpperCAmelCase__ = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
UpperCAmelCase__ = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
UpperCAmelCase__ = nn.Dropout(_lowercase )
def _UpperCAmelCase ( self : Tuple , _lowercase : Tuple , _lowercase : Optional[Any]=None , _lowercase : int=None , ):
"""simple docstring"""
UpperCAmelCase__ = self.layer_norm(_lowercase )
if conditioning_emb is not None:
UpperCAmelCase__ = self.FiLMLayer(_lowercase , _lowercase )
# Self-attention block
UpperCAmelCase__ = self.attention(_lowercase )
UpperCAmelCase__ = hidden_states + self.dropout(_lowercase )
return hidden_states
class lowercase__ ( nn.Module ):
def __init__( self : Dict , _lowercase : Optional[int] , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Dict ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
UpperCAmelCase__ = TaLayerNorm(_lowercase , eps=_lowercase )
UpperCAmelCase__ = nn.Dropout(_lowercase )
def _UpperCAmelCase ( self : List[str] , _lowercase : List[str] , _lowercase : Dict=None , _lowercase : Dict=None , ):
"""simple docstring"""
UpperCAmelCase__ = self.layer_norm(_lowercase )
UpperCAmelCase__ = self.attention(
_lowercase , encoder_hidden_states=_lowercase , attention_mask=attention_mask.squeeze(1 ) , )
UpperCAmelCase__ = hidden_states + self.dropout(_lowercase )
return layer_output
class lowercase__ ( nn.Module ):
def __init__( self : Dict , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : List[Any] , _lowercase : Tuple ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = TaDenseGatedActDense(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
UpperCAmelCase__ = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
UpperCAmelCase__ = TaLayerNorm(_lowercase , eps=_lowercase )
UpperCAmelCase__ = nn.Dropout(_lowercase )
def _UpperCAmelCase ( self : Union[str, Any] , _lowercase : Any , _lowercase : int=None ):
"""simple docstring"""
UpperCAmelCase__ = self.layer_norm(_lowercase )
if conditioning_emb is not None:
UpperCAmelCase__ = self.film(_lowercase , _lowercase )
UpperCAmelCase__ = self.DenseReluDense(_lowercase )
UpperCAmelCase__ = hidden_states + self.dropout(_lowercase )
return hidden_states
class lowercase__ ( nn.Module ):
def __init__( self : Optional[Any] , _lowercase : List[Any] , _lowercase : Dict , _lowercase : Dict ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
UpperCAmelCase__ = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
UpperCAmelCase__ = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
UpperCAmelCase__ = nn.Dropout(_lowercase )
UpperCAmelCase__ = NewGELUActivation()
def _UpperCAmelCase ( self : Any , _lowercase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.act(self.wi_a(_lowercase ) )
UpperCAmelCase__ = self.wi_a(_lowercase )
UpperCAmelCase__ = hidden_gelu * hidden_linear
UpperCAmelCase__ = self.dropout(_lowercase )
UpperCAmelCase__ = self.wo(_lowercase )
return hidden_states
class lowercase__ ( nn.Module ):
def __init__( self : str , _lowercase : List[Any] , _lowercase : List[str]=1E-6 ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Parameter(torch.ones(_lowercase ) )
UpperCAmelCase__ = eps
def _UpperCAmelCase ( self : int , _lowercase : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_lowercase )
UpperCAmelCase__ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCAmelCase__ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowercase__ ( nn.Module ):
def _UpperCAmelCase ( self : int , _lowercase : torch.Tensor ):
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_lowercase , 3.0 )) ))
class lowercase__ ( nn.Module ):
def __init__( self : Optional[Any] , _lowercase : List[str] , _lowercase : Dict ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Linear(_lowercase , out_features * 2 , bias=_lowercase )
def _UpperCAmelCase ( self : List[str] , _lowercase : Any , _lowercase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.scale_bias(_lowercase )
UpperCAmelCase__ , UpperCAmelCase__ = torch.chunk(_lowercase , 2 , -1 )
UpperCAmelCase__ = x * (1 + scale) + shift
return x
| 475 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowerCamelCase_ ( lowercase__):
lowerCamelCase__ = 384
lowerCamelCase__ = 7
if "tiny" in model_name:
lowerCamelCase__ = 96
lowerCamelCase__ = (2, 2, 6, 2)
lowerCamelCase__ = (3, 6, 12, 24)
elif "small" in model_name:
lowerCamelCase__ = 96
lowerCamelCase__ = (2, 2, 18, 2)
lowerCamelCase__ = (3, 6, 12, 24)
elif "base" in model_name:
lowerCamelCase__ = 128
lowerCamelCase__ = (2, 2, 18, 2)
lowerCamelCase__ = (4, 8, 16, 32)
lowerCamelCase__ = 12
lowerCamelCase__ = 512
elif "large" in model_name:
lowerCamelCase__ = 192
lowerCamelCase__ = (2, 2, 18, 2)
lowerCamelCase__ = (6, 12, 24, 48)
lowerCamelCase__ = 12
lowerCamelCase__ = 768
# set label information
lowerCamelCase__ = 150
lowerCamelCase__ = "huggingface/label-files"
lowerCamelCase__ = "ade20k-id2label.json"
lowerCamelCase__ = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset") , "r"))
lowerCamelCase__ = {int(lowercase__): v for k, v in idalabel.items()}
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
lowerCamelCase__ = SwinConfig(
embed_dim=lowercase__ , depths=lowercase__ , num_heads=lowercase__ , window_size=lowercase__ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
lowerCamelCase__ = UperNetConfig(
backbone_config=lowercase__ , auxiliary_in_channels=lowercase__ , num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ , )
return config
def lowerCamelCase_ ( lowercase__):
lowerCamelCase__ = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight"))
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias"))
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight"))
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias"))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight'''))
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias'''))
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table'''))
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index'''))
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight'''))
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias'''))
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight'''))
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias'''))
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight'''))
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias'''))
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight'''))
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias'''))
if i < 3:
rename_keys.append((F'''backbone.stages.{i}.downsample.reduction.weight''', F'''backbone.encoder.layers.{i}.downsample.reduction.weight'''))
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.weight''', F'''backbone.encoder.layers.{i}.downsample.norm.weight'''))
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.bias''', F'''backbone.encoder.layers.{i}.downsample.norm.bias'''))
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight'''))
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias'''))
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
])
# fmt: on
return rename_keys
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__):
lowerCamelCase__ = dct.pop(lowercase__)
lowerCamelCase__ = val
def lowerCamelCase_ ( lowercase__ , lowercase__):
lowerCamelCase__ = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))]
for i in range(len(backbone_config.depths)):
lowerCamelCase__ = num_features[i]
for j in range(backbone_config.depths[i]):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase__ = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''')
lowerCamelCase__ = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[:dim, :]
lowerCamelCase__ = in_proj_bias[: dim]
lowerCamelCase__ = in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase__ = in_proj_bias[
dim : dim * 2
]
lowerCamelCase__ = in_proj_weight[
-dim :, :
]
lowerCamelCase__ = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase_ ( lowercase__):
lowerCamelCase__ , lowerCamelCase__ = x.shape
lowerCamelCase__ = x.reshape(lowercase__ , 4 , in_channel // 4)
lowerCamelCase__ = x[:, [0, 2, 1, 3], :].transpose(1 , 2).reshape(lowercase__ , lowercase__)
return x
def lowerCamelCase_ ( lowercase__):
lowerCamelCase__ , lowerCamelCase__ = x.shape
lowerCamelCase__ = x.reshape(lowercase__ , in_channel // 4 , 4)
lowerCamelCase__ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2).reshape(lowercase__ , lowercase__)
return x
def lowerCamelCase_ ( lowercase__):
lowerCamelCase__ = x.shape[0]
lowerCamelCase__ = x.reshape(4 , in_channel // 4)
lowerCamelCase__ = x[[0, 2, 1, 3], :].transpose(0 , 1).reshape(lowercase__)
return x
def lowerCamelCase_ ( lowercase__):
lowerCamelCase__ = x.shape[0]
lowerCamelCase__ = x.reshape(in_channel // 4 , 4)
lowerCamelCase__ = x[:, [0, 2, 1, 3]].transpose(0 , 1).reshape(lowercase__)
return x
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__):
lowerCamelCase__ = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
lowerCamelCase__ = model_name_to_url[model_name]
lowerCamelCase__ = torch.hub.load_state_dict_from_url(lowercase__ , map_location="cpu" , file_name=lowercase__)[
"state_dict"
]
for name, param in state_dict.items():
print(lowercase__ , param.shape)
lowerCamelCase__ = get_upernet_config(lowercase__)
lowerCamelCase__ = UperNetForSemanticSegmentation(lowercase__)
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCamelCase__ = state_dict.pop(lowercase__)
if "bn" in key:
lowerCamelCase__ = key.replace("bn" , "batch_norm")
lowerCamelCase__ = val
# rename keys
lowerCamelCase__ = create_rename_keys(lowercase__)
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__)
read_in_q_k_v(lowercase__ , config.backbone_config)
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowerCamelCase__ = reverse_correct_unfold_reduction_order(lowercase__)
if "norm" in key:
lowerCamelCase__ = reverse_correct_unfold_norm_order(lowercase__)
model.load_state_dict(lowercase__)
# verify on image
lowerCamelCase__ = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
lowerCamelCase__ = Image.open(requests.get(lowercase__ , stream=lowercase__).raw).convert("RGB")
lowerCamelCase__ = SegformerImageProcessor()
lowerCamelCase__ = processor(lowercase__ , return_tensors="pt").pixel_values
with torch.no_grad():
lowerCamelCase__ = model(lowercase__)
lowerCamelCase__ = outputs.logits
print(logits.shape)
print("First values of logits:" , logits[0, 0, :3, :3])
# assert values
if model_name == "upernet-swin-tiny":
lowerCamelCase__ = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]])
elif model_name == "upernet-swin-small":
lowerCamelCase__ = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]])
elif model_name == "upernet-swin-base":
lowerCamelCase__ = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]])
elif model_name == "upernet-swin-large":
lowerCamelCase__ = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]])
print("Logits:" , outputs.logits[0, 0, :3, :3])
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase__ , atol=1E-4)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(lowercase__)
print(F'''Saving processor to {pytorch_dump_folder_path}''')
processor.save_pretrained(lowercase__)
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''')
model.push_to_hub(F'''openmmlab/{model_name}''')
processor.push_to_hub(F'''openmmlab/{model_name}''')
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[F"""upernet-swin-{size}""" for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__A : Optional[int] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 187 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Optional[Any] = logging.get_logger(__name__)
__A : int = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = "vit"
def __init__( self : Optional[int] , __lowerCamelCase : Tuple=768 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : int=3072 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : Optional[int]=0.0 , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : Optional[Any]=0.0_2 , __lowerCamelCase : int=1E-12 , __lowerCamelCase : Tuple=224 , __lowerCamelCase : int=16 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=16 , **__lowerCamelCase : Optional[int] , ) -> str:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = encoder_stride
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = version.parse("1.11" )
@property
def a__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def a__ ( self : Optional[int] ) -> float:
'''simple docstring'''
return 1E-4
| 187 | 1 |
import os
from distutils.util import strtobool
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] ) -> int:
for e in env_keys:
UpperCAmelCase_ = int(os.environ.get(__UpperCamelCase , -1 ) )
if val >= 0:
return val
return default
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any]=False ) -> int:
UpperCAmelCase_ = os.environ.get(__UpperCamelCase , str(__UpperCamelCase ) )
return strtobool(__UpperCamelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Dict="no" ) -> List[str]:
UpperCAmelCase_ = os.environ.get(__UpperCamelCase , str(__UpperCamelCase ) )
return value
| 144 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def lowerCamelCase_ ( self : List[str] ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def lowerCamelCase_ ( self : Tuple ):
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
UpperCAmelCase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = AudioDiffusionPipeline(vqvae=__snake_case , unet=self.dummy_unet , mel=__snake_case , scheduler=__snake_case )
UpperCAmelCase_ = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(42 )
UpperCAmelCase_ = pipe(generator=__snake_case , steps=4 )
UpperCAmelCase_ = output.audios[0]
UpperCAmelCase_ = output.images[0]
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(42 )
UpperCAmelCase_ = pipe(generator=__snake_case , steps=4 , return_dict=__snake_case )
UpperCAmelCase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCAmelCase_ = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
UpperCAmelCase_ = np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
UpperCAmelCase_ = DDIMScheduler()
UpperCAmelCase_ = self.dummy_vqvae_and_unet
UpperCAmelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__snake_case , scheduler=__snake_case )
UpperCAmelCase_ = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
np.random.seed(0 )
UpperCAmelCase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(42 )
UpperCAmelCase_ = pipe(raw_audio=__snake_case , generator=__snake_case , start_step=5 , steps=10 )
UpperCAmelCase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCAmelCase_ = np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase_ = self.dummy_unet_condition
UpperCAmelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__snake_case , mel=__snake_case , scheduler=__snake_case )
UpperCAmelCase_ = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
np.random.seed(0 )
UpperCAmelCase_ = torch.rand((1, 1, 10) )
UpperCAmelCase_ = pipe(generator=__snake_case , encoding=__snake_case )
UpperCAmelCase_ = output.images[0]
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCAmelCase_ = np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Union[str, Any] ):
UpperCAmelCase_ = torch_device
UpperCAmelCase_ = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
UpperCAmelCase_ = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(42 )
UpperCAmelCase_ = pipe(generator=__snake_case )
UpperCAmelCase_ = output.audios[0]
UpperCAmelCase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCAmelCase_ = np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 144 | 1 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowerCamelCase__ : list[int | float] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> int | float:
if len(lowerCamelCase__ ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(lowerCamelCase__ )
or left < -len(lowerCamelCase__ )
or right >= len(lowerCamelCase__ )
or right < -len(lowerCamelCase__ )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
lowerCamelCase_ : Dict =(left + right) >> 1 # the middle
lowerCamelCase_ : Optional[int] =find_max(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # find max in range[left, mid]
lowerCamelCase_ : Optional[int] =find_max(lowerCamelCase__ , mid + 1 , lowerCamelCase__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 244 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
A__ : int = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
A__ : Dict = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
A__ : Optional[int] = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def _snake_case ( lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] ) -> List[Any]:
return float((preds == labels).mean() )
def _snake_case ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple="binary" ) -> Tuple:
lowerCamelCase_ : Optional[int] =simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] =float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ , average=lowerCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case ( lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] ) -> int:
lowerCamelCase_ : Optional[int] ={}
for id_pred, label in zip(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ : Union[str, Any] =F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
lowerCamelCase_ : Any =id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCamelCase_ : int =[(pred, label)]
lowerCamelCase_ , lowerCamelCase_ : Optional[int] =[], []
for question, preds_labels in question_map.items():
lowerCamelCase_ , lowerCamelCase_ : Dict =zip(*lowerCamelCase__ )
lowerCamelCase_ : Any =fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ , average="macro" )
fas.append(lowerCamelCase__ )
lowerCamelCase_ : Union[str, Any] =int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase__ ) )
ems.append(lowerCamelCase__ )
lowerCamelCase_ : Any =float(sum(lowerCamelCase__ ) / len(lowerCamelCase__ ) )
lowerCamelCase_ : List[Any] =sum(lowerCamelCase__ ) / len(lowerCamelCase__ )
lowerCamelCase_ : List[str] =float(fa_score(y_true=lowerCamelCase__ , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCAmelCase__ ( self : Union[str, Any] ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def UpperCAmelCase__ ( self : int ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def UpperCAmelCase__ ( self : Dict , snake_case__ : Optional[int] , snake_case__ : Dict ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(snake_case__ , snake_case__ )}
elif self.config_name == "cb":
return acc_and_fa(snake_case__ , snake_case__ , fa_avg="macro" )
elif self.config_name == "record":
lowerCamelCase_ : List[Any] =[
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
lowerCamelCase_ : str ={pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(snake_case__ , snake_case__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(snake_case__ , snake_case__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(snake_case__ , snake_case__ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 244 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list[int] ) -> int:
if not nums:
return 0
_lowercase = nums[0]
_lowercase = 0
for num in nums[1:]:
_lowercase , _lowercase = (
max_excluding + num,
max(snake_case__ , snake_case__ ),
)
return max(snake_case__ , snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='utf-8' ,check=__A ,)
assert hasattr(self ,'env' )
def __UpperCAmelCase ( self : str ,__A : Tuple ) -> int:
# configuration for running training on smdistributed Model Parallel
_lowercase = {
'enabled': True,
'processes_per_host': 8,
}
_lowercase = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
_lowercase = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
_lowercase = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=__A ,instance_type=self.instance_type ,debugger_hook_config=__A ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=__A ,py_version='py36' ,)
def __UpperCAmelCase ( self : List[Any] ,__A : Any ) -> Optional[Any]:
TrainingJobAnalytics(__A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ) -> Optional[Any]:
# create estimator
_lowercase = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
_lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,__A ) | 67 | 1 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = Dict[str, Any]
lowerCAmelCase__ = List[Prediction]
@add_end_docstrings(_UpperCAmelCase )
class snake_case ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
super().__init__(*lowercase__ , **lowercase__ )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def snake_case__ ( self , **lowerCAmelCase_ ):
__lowercase = {}
if "threshold" in kwargs:
__lowercase = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return super().__call__(*lowercase__ , **lowercase__ )
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = load_image(lowercase__ )
__lowercase = torch.IntTensor([[image.height, image.width]] )
__lowercase = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
__lowercase = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
__lowercase = target_size
return inputs
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = model_inputs.pop("target_size" )
__lowercase = self.model(**lowercase__ )
__lowercase = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
__lowercase = model_inputs["bbox"]
return model_outputs
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_=0.9 ):
__lowercase = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__lowercase = target_size[0].tolist()
def unnormalize(lowerCAmelCase_ ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
__lowercase = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__lowercase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__lowercase = [unnormalize(lowercase__ ) for bbox in model_outputs["bbox"].squeeze(0 )]
__lowercase = ["score", "label", "box"]
__lowercase = [dict(zip(lowercase__ , lowercase__ ) ) for vals in zip(scores.tolist() , lowercase__ , lowercase__ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__lowercase = self.image_processor.post_process_object_detection(lowercase__ , lowercase__ , lowercase__ )
__lowercase = raw_annotations[0]
__lowercase = raw_annotation["scores"]
__lowercase = raw_annotation["labels"]
__lowercase = raw_annotation["boxes"]
__lowercase = scores.tolist()
__lowercase = [self.model.config.idalabel[label.item()] for label in labels]
__lowercase = [self._get_bounding_box(lowercase__ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__lowercase = ["score", "label", "box"]
__lowercase = [
dict(zip(lowercase__ , lowercase__ ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def snake_case__ ( self , lowerCAmelCase_ ):
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
__lowercase = box.int().tolist()
__lowercase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 702 | import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __snake_case ,unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = PhobertTokenizer
__lowerCAmelCase = False
def snake_case__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase = ["T@@", "i", "I", "R@@", "r", "e@@"]
__lowercase = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
__lowercase = ["#version: 0.2", "l à</w>"]
__lowercase = {"unk_token": "<unk>"}
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase_ ) )
def snake_case__ ( self , **lowerCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = "Tôi là VinAI Research"
__lowercase = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def snake_case__ ( self ):
__lowercase = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase = "Tôi là VinAI Research"
__lowercase = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
__lowercase = tokenizer.tokenize(lowerCAmelCase_ )
print(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
| 576 | 0 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
lowerCAmelCase__ = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : str ,lowercase__ : int ,lowercase__ : Optional[Any]="<unk>" ,lowercase__ : str="<s>" ,lowercase__ : List[Any]="</s>" ,lowercase__ : Dict="<pad>" ,lowercase__ : int="[SEP]" ,lowercase__ : str="[MASK]" ,lowercase__ : Tuple="[CLS]" ,lowercase__ : Optional[Dict[str, Any]] = None ,**lowercase__ : str ,):
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else bos_token
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else eos_token
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else unk_token
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else pad_token
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else cls_token
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else mask_token
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase__ ,eos_token=lowercase__ ,unk_token=lowercase__ ,pad_token=lowercase__ ,sep_token=lowercase__ ,mask_token=lowercase__ ,cls_token=lowercase__ ,sp_model_kwargs=self.sp_model_kwargs ,**lowercase__ ,)
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : str ,lowercase__ : Optional[int] ):
__lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : str ):
return self.sp_model.encode(lowercase__ ,out_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Dict ):
return self.sp_model.piece_to_id(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ):
__lowercase = self.sp_model.IdToPiece(lowercase__ )
return token
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Tuple ):
__lowercase = []
__lowercase = ''''''
__lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__ ) + token
__lowercase = True
__lowercase = []
else:
current_sub_tokens.append(lowercase__ )
__lowercase = False
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[int] ,lowercase__ : bool = False ,lowercase__ : bool = None ,lowercase__ : bool = True ,**lowercase__ : Optional[Any] ,):
__lowercase = kwargs.pop('''use_source_tokenizer''' ,lowercase__ )
__lowercase = self.convert_ids_to_tokens(lowercase__ ,skip_special_tokens=lowercase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__lowercase = []
__lowercase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowercase__ ) )
__lowercase = []
sub_texts.append(lowercase__ )
else:
current_sub_text.append(lowercase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowercase__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
__lowercase = re.sub(r''' (\[(MASK|SEP)\])''' ,r'''\1''' ,''' '''.join(lowercase__ ) )
else:
__lowercase = ''''''.join(lowercase__ )
__lowercase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__lowercase = self.clean_up_tokenization(lowercase__ )
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ ,'''wb''' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ,lowercase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ ,token_ids_a=lowercase__ ,already_has_special_tokens=lowercase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase__ )) + [1]
return [1] + ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 41 |
"""simple docstring"""
from copy import deepcopy
class UpperCAmelCase :
def __init__( self : Optional[Any] , __lowerCamelCase : list[int] | None = None , __lowerCamelCase : int | None = None ):
"""simple docstring"""
if arr is None and size is not None:
_snake_case = size
_snake_case = [0] * size
elif arr is not None:
self.init(__lowerCamelCase )
else:
raise ValueError('''Either arr or size must be specified''' )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : list[int] ):
"""simple docstring"""
_snake_case = len(__lowerCamelCase )
_snake_case = deepcopy(__lowerCamelCase )
for i in range(1 , self.size ):
_snake_case = self.next_(__lowerCamelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
_snake_case = self.next_(__lowerCamelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : int ):
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : int ):
"""simple docstring"""
return index - (index & (-index))
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
_snake_case = self.next_(__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
self.add(__lowerCamelCase , value - self.get(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : int ):
"""simple docstring"""
if right == 0:
return 0
_snake_case = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
_snake_case = self.prev(__lowerCamelCase )
return result
def __UpperCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
return self.prefix(__lowerCamelCase ) - self.prefix(__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : int ):
"""simple docstring"""
return self.query(__lowerCamelCase , index + 1 )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : int ):
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
_snake_case = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
_snake_case = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( a ):
_UpperCamelCase = ["""image_processor""", """tokenizer"""]
_UpperCamelCase = """LayoutLMv3ImageProcessor"""
_UpperCamelCase = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
A : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _UpperCAmelCase , )
A : List[str] = kwargs.pop('''feature_extractor''' )
A : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = None , **_UpperCAmelCase , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
A : str = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
A : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
A : Dict = features['''words''']
A : Any = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
A : List[str] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
A : Any = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
A : Any = images
return encoded_inputs
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
A : str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}''' )
return images_with_overflow
def snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def snake_case ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def snake_case ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _UpperCAmelCase , )
return self.image_processor_class
@property
def snake_case ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _UpperCAmelCase , )
return self.image_processor
| 537 |
'''simple docstring'''
from math import pi
def _lowerCamelCase( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 537 | 1 |