code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import pytest
_UpperCAmelCase : List[Any] = """__dummy_dataset1__"""
_UpperCAmelCase : Optional[int] = """
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def snake_case__ ( ) -> str:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def snake_case__ ( ) -> Any:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
_UpperCamelCase : Any = dataset_loading_script_name
_UpperCamelCase : Tuple = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=UpperCamelCase )
_UpperCamelCase : Any = script_dir / f'''{script_name}.py'''
with open(UpperCamelCase ,'''w''' ) as f:
f.write(UpperCamelCase )
return str(UpperCamelCase )
| 683 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_UpperCAmelCase : int = 100
_UpperCAmelCase : List[Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_UpperCAmelCase : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def snake_case__ ( UpperCamelCase ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_UpperCamelCase : set[int] = set()
_UpperCamelCase : int
_UpperCamelCase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def snake_case__ ( UpperCamelCase = 50_00 ) -> int | None:
for number_to_partition in range(1 ,UpperCamelCase ):
if len(partition(UpperCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 683 | 1 |
'''simple docstring'''
from manim import *
class UpperCAmelCase ( a_ ):
"""simple docstring"""
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : int = Rectangle(height=0.5 , width=0.5 )
_UpperCamelCase : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_UpperCamelCase : int = [mem.copy() for i in range(6 )]
_UpperCamelCase : List[Any] = [mem.copy() for i in range(6 )]
_UpperCamelCase : Tuple = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
_UpperCamelCase : Optional[int] = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
_UpperCamelCase : Optional[int] = VGroup(_snake_case , _snake_case ).arrange(_snake_case , buff=0 )
_UpperCamelCase : Tuple = Text('''CPU''' , font_size=24 )
_UpperCamelCase : List[str] = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_snake_case )
_UpperCamelCase : List[str] = [mem.copy() for i in range(1 )]
_UpperCamelCase : Dict = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
_UpperCamelCase : List[str] = Text('''GPU''' , font_size=24 )
_UpperCamelCase : Any = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
gpu.align_to(_snake_case , _snake_case )
gpu.set_x(gpu.get_x() - 1 )
self.add(_snake_case )
_UpperCamelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
_UpperCamelCase : int = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
_UpperCamelCase : Any = Text('''Model''' , font_size=24 )
_UpperCamelCase : Optional[int] = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
model.move_to([3, -1.0, 0] )
self.play(
Create(_snake_case , run_time=1 ) , Create(_snake_case , run_time=1 ) , Create(_snake_case , run_time=1 ) , )
_UpperCamelCase : Tuple = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
_UpperCamelCase : Dict = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCamelCase : List[Any] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_snake_case , run_time=2.5 ) , Write(_snake_case ) , Write(_snake_case ) )
self.add(_snake_case )
_UpperCamelCase : Dict = []
_UpperCamelCase : Any = []
_UpperCamelCase : Union[str, Any] = []
for i, rect in enumerate(_snake_case ):
_UpperCamelCase : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_snake_case , opacity=0.7 )
cpu_target.move_to(_snake_case )
cpu_target.generate_target()
_UpperCamelCase : Any = 0.46 / 4
_UpperCamelCase : int = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_snake_case )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_snake_case , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_snake_case , buff=0.0 )
cpu_targs.append(_snake_case )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_snake_case ) )
second_animations.append(MoveToTarget(_snake_case , run_time=1.5 ) )
self.play(*_snake_case )
self.play(*_snake_case )
self.wait()
| 683 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_UpperCAmelCase : Dict = """bart"""
_UpperCAmelCase : List[str] = True
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> int:
if LOAD_DENSE_INDEX:
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase : Tuple = qar_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase : Any = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase : Dict = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase : Tuple = sas_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = make_qa_sas_model(
model_name='''t5-small''' ,from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' ,device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> List[Any]:
if LOAD_DENSE_INDEX:
_UpperCamelCase : str = faiss.StandardGpuResources()
_UpperCamelCase : Optional[int] = datasets.load_dataset(path='''wiki_snippets''' ,name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase : List[str] = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(wikiaab_passages.num_rows, 1_28) ,)
_UpperCamelCase : Any = faiss.IndexFlatIP(1_28 )
_UpperCamelCase : str = faiss.index_cpu_to_gpu(UpperCamelCase ,1 ,UpperCamelCase )
wikiaab_gpu_index_flat.add(UpperCamelCase ) # TODO fix for larger GPU
else:
_UpperCamelCase, _UpperCamelCase : Optional[int] = (None, None)
_UpperCamelCase : int = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : List[Any] = datasets.load_dataset('''eli5''' ,name='''LFQA_reddit''' )
_UpperCamelCase : Optional[int] = elia['''train_eli5''']
_UpperCamelCase : Any = np.memmap(
'''eli5_questions_reps.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(elia_train.num_rows, 1_28) )
_UpperCamelCase : Optional[Any] = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(UpperCamelCase )
return (elia_train, eli5_train_q_index)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_indexes()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_models()
_UpperCAmelCase , _UpperCAmelCase : int = load_train_data()
def snake_case__ ( UpperCamelCase ,UpperCamelCase=10 ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = embed_questions_for_retrieval([question] ,UpperCamelCase ,UpperCamelCase )
_UpperCamelCase, _UpperCamelCase : Optional[Any] = eli5_train_q_index.search(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = [elia_train[int(UpperCamelCase )] for i in I[0]]
return nn_examples
def snake_case__ ( UpperCamelCase ,UpperCamelCase="wiki40b" ,UpperCamelCase="dense" ,UpperCamelCase=10 ) -> Optional[int]:
if source == "none":
_UpperCamelCase, _UpperCamelCase : Dict = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase, _UpperCamelCase : str = query_qa_dense_index(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
else:
_UpperCamelCase, _UpperCamelCase : str = query_es_index(
UpperCamelCase ,UpperCamelCase ,index_name='''english_wiki40b_snippets_100w''' ,n_results=UpperCamelCase ,)
_UpperCamelCase : Optional[int] = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase : Optional[Any] = '''question: {} context: {}'''.format(UpperCamelCase ,UpperCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCamelCase : None),
} )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=64 ,UpperCamelCase=2_56 ,UpperCamelCase=False ,UpperCamelCase=2 ,UpperCamelCase=0.95 ,UpperCamelCase=0.8 ) -> Optional[Any]:
with torch.no_grad():
_UpperCamelCase : Any = qa_sas_generate(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,num_answers=1 ,num_beams=UpperCamelCase ,min_len=UpperCamelCase ,max_len=UpperCamelCase ,do_sample=UpperCamelCase ,temp=UpperCamelCase ,top_p=UpperCamelCase ,top_k=UpperCamelCase ,max_input_length=10_24 ,device='''cuda:0''' ,)[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_UpperCAmelCase : str = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_UpperCAmelCase : Tuple = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_UpperCAmelCase : Dict = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_UpperCAmelCase : List[str] = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_UpperCAmelCase : Optional[int] = st.sidebar.checkbox("""Demo options""")
if demo_options:
_UpperCAmelCase : List[str] = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_UpperCAmelCase : List[Any] = action_list.index(action_st)
_UpperCAmelCase : Tuple = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_UpperCAmelCase : Optional[Any] = show_type == """Show full text of passages"""
else:
_UpperCAmelCase : Union[str, Any] = 3
_UpperCAmelCase : str = True
_UpperCAmelCase : str = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_UpperCAmelCase : Optional[Any] = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_UpperCAmelCase : Optional[Any] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_UpperCAmelCase : Dict = """wiki40b"""
_UpperCAmelCase : str = """dense"""
_UpperCAmelCase : List[str] = """beam"""
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : List[str] = 64
_UpperCAmelCase : List[Any] = 256
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = st.sidebar.checkbox("""Generation options""")
if generate_options:
_UpperCAmelCase : Union[str, Any] = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_UpperCAmelCase : Dict = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_UpperCAmelCase : List[Any] = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_UpperCAmelCase : List[str] = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[int] = None
# start main text
_UpperCAmelCase : Union[str, Any] = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_UpperCAmelCase : int = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_UpperCAmelCase : Any = st.text_input("""Enter your question here:""", """""")
else:
_UpperCAmelCase : Tuple = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_UpperCAmelCase , _UpperCAmelCase : str = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_UpperCAmelCase , _UpperCAmelCase : List[Any] = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_UpperCAmelCase : int = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_UpperCAmelCase : int = support_list[:10]
_UpperCAmelCase : Tuple = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_UpperCAmelCase , _UpperCAmelCase : Any = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_UpperCAmelCase : Tuple = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_UpperCAmelCase : List[Any] = res[1].strip()
if sec_titles == "":
_UpperCAmelCase : Optional[int] = """[{}]({})""".format(res[0], wiki_url)
else:
_UpperCAmelCase : Optional[int] = sec_titles.split(""" & """)
_UpperCAmelCase : Tuple = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_UpperCAmelCase : Dict = find_nearest_training(question)
_UpperCAmelCase : List[Any] = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_UpperCAmelCase : List[Any] = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_UpperCAmelCase : List[Any] = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 683 | 1 |
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : Optional[Any] = (DPMSolverSDEScheduler,)
A__ : Optional[int] = 10
def _lowercase ( self , **_snake_case ) -> Optional[Any]:
_UpperCamelCase : str = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**_snake_case )
return config
def _lowercase ( self ) -> Union[str, Any]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case )
def _lowercase ( self ) -> Optional[Any]:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case )
def _lowercase ( self ) -> Optional[int]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_snake_case )
def _lowercase ( self ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def _lowercase ( self ) -> str:
_UpperCamelCase : Any = self.scheduler_classes[0]
_UpperCamelCase : Optional[Any] = self.get_scheduler_config()
_UpperCamelCase : Dict = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase : Dict = self.dummy_model()
_UpperCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase : Optional[int] = sample.to(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase : List[str] = scheduler.scale_model_input(_snake_case , _snake_case )
_UpperCamelCase : Any = model(_snake_case , _snake_case )
_UpperCamelCase : Union[str, Any] = scheduler.step(_snake_case , _snake_case , _snake_case )
_UpperCamelCase : List[str] = output.prev_sample
_UpperCamelCase : Tuple = torch.sum(torch.abs(_snake_case ) )
_UpperCamelCase : List[Any] = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1E-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def _lowercase ( self ) -> Optional[Any]:
_UpperCamelCase : Tuple = self.scheduler_classes[0]
_UpperCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
_UpperCamelCase : Union[str, Any] = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase : Optional[int] = self.dummy_model()
_UpperCamelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase : Tuple = sample.to(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase : Union[str, Any] = scheduler.scale_model_input(_snake_case , _snake_case )
_UpperCamelCase : Tuple = model(_snake_case , _snake_case )
_UpperCamelCase : str = scheduler.step(_snake_case , _snake_case , _snake_case )
_UpperCamelCase : Any = output.prev_sample
_UpperCamelCase : Any = torch.sum(torch.abs(_snake_case ) )
_UpperCamelCase : int = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1E-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1E-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1E-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1E-3
def _lowercase ( self ) -> Any:
_UpperCamelCase : Tuple = self.scheduler_classes[0]
_UpperCamelCase : Any = self.get_scheduler_config()
_UpperCamelCase : str = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=_snake_case )
_UpperCamelCase : Union[str, Any] = self.dummy_model()
_UpperCamelCase : int = self.dummy_sample_deter.to(_snake_case ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase : Union[str, Any] = scheduler.scale_model_input(_snake_case , _snake_case )
_UpperCamelCase : Any = model(_snake_case , _snake_case )
_UpperCamelCase : str = scheduler.step(_snake_case , _snake_case , _snake_case )
_UpperCamelCase : Any = output.prev_sample
_UpperCamelCase : int = torch.sum(torch.abs(_snake_case ) )
_UpperCamelCase : List[str] = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1E-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def _lowercase ( self ) -> str:
_UpperCamelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCamelCase : str = self.get_scheduler_config()
_UpperCamelCase : Optional[int] = scheduler_class(**_snake_case , use_karras_sigmas=_snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=_snake_case )
_UpperCamelCase : int = self.dummy_model()
_UpperCamelCase : Optional[Any] = self.dummy_sample_deter.to(_snake_case ) * scheduler.init_noise_sigma
_UpperCamelCase : Dict = sample.to(_snake_case )
for t in scheduler.timesteps:
_UpperCamelCase : Union[str, Any] = scheduler.scale_model_input(_snake_case , _snake_case )
_UpperCamelCase : int = model(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = scheduler.step(_snake_case , _snake_case , _snake_case )
_UpperCamelCase : Union[str, Any] = output.prev_sample
_UpperCamelCase : Tuple = torch.sum(torch.abs(_snake_case ) )
_UpperCamelCase : Optional[Any] = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
| 683 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> Optional[int]:
_UpperCamelCase : int = value
_UpperCamelCase : Node | None = None # Added in order to delete a node easier
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> List[Any]:
_UpperCamelCase : str = root
def __str__( self ) -> str:
return str(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if new_children is not None: # reset its kids
_UpperCamelCase : Union[str, Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_snake_case ): # If it is the right children
_UpperCamelCase : str = new_children
else:
_UpperCamelCase : Any = new_children
else:
_UpperCamelCase : Any = new_children
def _lowercase ( self , _snake_case ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _lowercase ( self ) -> bool:
return self.root is None
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : List[Any] = Node(_snake_case ) # create a new Node
if self.empty(): # if Tree is empty
_UpperCamelCase : Optional[Any] = new_node # set its root
else: # Tree is not empty
_UpperCamelCase : int = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_UpperCamelCase : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
_UpperCamelCase : Union[str, Any] = parent_node.left
else:
if parent_node.right is None:
_UpperCamelCase : Any = new_node
break
else:
_UpperCamelCase : str = parent_node.right
_UpperCamelCase : Any = parent_node
def _lowercase ( self , *_snake_case ) -> None:
for value in values:
self.__insert(_snake_case )
def _lowercase ( self , _snake_case ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
_UpperCamelCase : List[str] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_UpperCamelCase : Optional[Any] = node.left if value < node.value else node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
if self.root is None:
return None
_UpperCamelCase : Dict = self.root
if not self.empty():
while node.right is not None:
_UpperCamelCase : Tuple = node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
_UpperCamelCase : Optional[Any] = self.root
if self.root is None:
return None
if not self.empty():
_UpperCamelCase : Optional[int] = self.root
while node.left is not None:
_UpperCamelCase : List[str] = node.left
return node
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : str = self.search(_snake_case ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_snake_case , _snake_case )
elif node.left is None: # Has only right children
self.__reassign_nodes(_snake_case , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_snake_case , node.left )
else:
_UpperCamelCase : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_UpperCamelCase : int = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _lowercase ( self , _snake_case ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def _lowercase ( self , _snake_case=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if node:
self.inorder(_snake_case , node.left )
arr.append(node.value )
self.inorder(_snake_case , node.right )
def _lowercase ( self , _snake_case , _snake_case ) -> int:
_UpperCamelCase : list[int] = []
self.inorder(_snake_case , _snake_case ) # append all values to list using inorder traversal
return arr[k - 1]
def snake_case__ ( UpperCamelCase ) -> list[Node]:
_UpperCamelCase : int = []
if curr_node is not None:
_UpperCamelCase : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def snake_case__ ( ) -> None:
_UpperCamelCase : Any = (8, 3, 6, 1, 10, 14, 13, 4, 7)
_UpperCamelCase : Tuple = BinarySearchTree()
for i in testlist:
t.insert(UpperCamelCase )
# Prints all the elements of the list in order traversal
print(UpperCamelCase )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' ,t.get_max().value ) # type: ignore
print('''Min Value: ''' ,t.get_min().value ) # type: ignore
for i in testlist:
t.remove(UpperCamelCase )
print(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 683 | 1 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
_UpperCAmelCase : Any = logging.getLogger()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> List[Any]:
_UpperCamelCase : str = '''\n'''.join(UpperCamelCase )
Path(UpperCamelCase ).open('''w''' ).writelines(UpperCamelCase )
_UpperCAmelCase : List[str] = """patrickvonplaten/t5-tiny-random"""
_UpperCAmelCase : List[str] = """sshleifer/bart-tiny-random"""
_UpperCAmelCase : Tuple = """sshleifer/tiny-mbart"""
_UpperCAmelCase : Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class UpperCAmelCase ( a_ ):
"""simple docstring"""
def _lowercase ( self , _snake_case ) -> List[str]:
_UpperCamelCase : Dict = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
_UpperCamelCase : Optional[int] = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
_UpperCamelCase : List[str] = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(_snake_case , _snake_case )
_UpperCamelCase : int = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' )
_UpperCamelCase : List[Any] = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
_UpperCamelCase : Optional[int] = F'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(_snake_case , '''argv''' , _snake_case ):
run_generate()
assert Path(_snake_case ).exists()
# os.remove(Path(output_file_name))
def _lowercase ( self ) -> List[Any]:
self.run_eval_tester(_snake_case )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _lowercase ( self , _snake_case ) -> Optional[int]:
self.run_eval_tester(_snake_case )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _lowercase ( self , _snake_case ) -> Dict:
_UpperCamelCase : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
_UpperCamelCase : List[Any] = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
_UpperCamelCase : List[Any] = {
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
_UpperCamelCase : Any = Path(self.get_auto_remove_tmp_dir() )
_UpperCamelCase : str = str(tmp_dir / '''scores.json''' )
_UpperCamelCase : List[Any] = str(tmp_dir / '''val.target''' )
_dump_articles(_snake_case , text['''en'''] )
_dump_articles(_snake_case , text['''de'''] )
_UpperCamelCase : Optional[int] = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
_UpperCamelCase : Union[str, Any] = F'''
run_eval_search.py
{model}
{str(_snake_case )}
{str(_snake_case )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] )
with patch.object(_snake_case , '''argv''' , _snake_case ):
with CaptureStdout() as cs:
run_search()
_UpperCamelCase : Optional[int] = [''' num_beams | length_penalty''', model, '''Best score args''']
_UpperCamelCase : int = ['''Info''']
if "translation" in task:
expected_strings.append('''bleu''' )
else:
expected_strings.extend(_snake_case )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_snake_case ).exists()
os.remove(Path(_snake_case ) )
| 683 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
_UpperCAmelCase : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
_UpperCAmelCase : int = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : Dict = 'whisper'
A__ : Tuple = ['past_key_values']
A__ : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _snake_case=51865 , _snake_case=80 , _snake_case=6 , _snake_case=4 , _snake_case=6 , _snake_case=4 , _snake_case=1536 , _snake_case=1536 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=50257 , _snake_case=True , _snake_case=True , _snake_case="gelu" , _snake_case=256 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=False , _snake_case=1500 , _snake_case=448 , _snake_case=50256 , _snake_case=50256 , _snake_case=50256 , _snake_case=None , _snake_case=[220, 50256] , _snake_case=False , _snake_case=256 , _snake_case=False , _snake_case=0.05 , _snake_case=10 , _snake_case=2 , _snake_case=0.0 , _snake_case=10 , _snake_case=0 , _snake_case=7 , **_snake_case , ) -> Any:
_UpperCamelCase : Union[str, Any] = vocab_size
_UpperCamelCase : Union[str, Any] = num_mel_bins
_UpperCamelCase : List[str] = d_model
_UpperCamelCase : str = encoder_layers
_UpperCamelCase : Optional[int] = encoder_attention_heads
_UpperCamelCase : str = decoder_layers
_UpperCamelCase : Tuple = decoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : Optional[int] = encoder_ffn_dim
_UpperCamelCase : Any = dropout
_UpperCamelCase : Optional[Any] = attention_dropout
_UpperCamelCase : List[Any] = activation_dropout
_UpperCamelCase : int = activation_function
_UpperCamelCase : List[Any] = init_std
_UpperCamelCase : Optional[int] = encoder_layerdrop
_UpperCamelCase : str = decoder_layerdrop
_UpperCamelCase : List[str] = use_cache
_UpperCamelCase : Optional[Any] = encoder_layers
_UpperCamelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase : List[str] = max_source_positions
_UpperCamelCase : Optional[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase : str = classifier_proj_size
_UpperCamelCase : List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase : int = apply_spec_augment
_UpperCamelCase : str = mask_time_prob
_UpperCamelCase : int = mask_time_length
_UpperCamelCase : List[Any] = mask_time_min_masks
_UpperCamelCase : List[str] = mask_feature_prob
_UpperCamelCase : Optional[int] = mask_feature_length
_UpperCamelCase : Union[str, Any] = mask_feature_min_masks
_UpperCamelCase : Union[str, Any] = median_filter_width
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , suppress_tokens=_snake_case , begin_suppress_tokens=_snake_case , **_snake_case , )
class UpperCAmelCase ( a_ ):
"""simple docstring"""
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
_UpperCamelCase : Dict = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
_UpperCamelCase : Tuple = {0: '''batch'''}
else:
_UpperCamelCase : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''' )
return common_inputs
def _lowercase ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , _snake_case = 22050 , _snake_case = 5.0 , _snake_case = 220 , ) -> Mapping[str, Any]:
_UpperCamelCase : Optional[int] = OrderedDict()
_UpperCamelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_snake_case , framework=_snake_case , sampling_rate=_snake_case , time_duration=_snake_case , frequency=_snake_case , )
_UpperCamelCase : int = encoder_inputs['''input_features'''].shape[2]
_UpperCamelCase : List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
_UpperCamelCase : str = super().generate_dummy_inputs(
preprocessor.tokenizer , _snake_case , _snake_case , _snake_case , _snake_case )
_UpperCamelCase : Union[str, Any] = encoder_inputs.pop('''input_features''' )
_UpperCamelCase : Dict = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
_UpperCamelCase : List[str] = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def _lowercase ( self ) -> float:
return 1E-3
| 683 | 1 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_UpperCAmelCase : Optional[int] = TypeVar("""T""")
class UpperCAmelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self , _snake_case = True ) -> None:
_UpperCamelCase : dict[T, list[T]] = {} # dictionary of lists
_UpperCamelCase : str = directed
def _lowercase ( self , _snake_case , _snake_case ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
self.adj_list[destination_vertex].append(_snake_case )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
_UpperCamelCase : int = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_snake_case )
_UpperCamelCase : Optional[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_UpperCamelCase : Tuple = [destination_vertex]
_UpperCamelCase : Optional[Any] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
_UpperCamelCase : Dict = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_UpperCamelCase : str = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_UpperCamelCase : Dict = [destination_vertex]
_UpperCamelCase : str = []
return self
def __repr__( self ) -> str:
return pformat(self.adj_list )
| 683 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase : int = parser.parse_args()
if args.model_type == "roberta":
_UpperCAmelCase : Union[str, Any] = RobertaForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase : int = """roberta"""
elif args.model_type == "gpt2":
_UpperCAmelCase : Optional[int] = GPTaLMHeadModel.from_pretrained(args.model_name)
_UpperCAmelCase : Optional[int] = """transformer"""
_UpperCAmelCase : Tuple = model.state_dict()
_UpperCAmelCase : int = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_UpperCAmelCase : Optional[Any] = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_UpperCAmelCase : Tuple = f"""{prefix}.embeddings.{w}.weight"""
_UpperCAmelCase : Optional[Any] = state_dict[param_name]
for w in ["weight", "bias"]:
_UpperCAmelCase : Union[str, Any] = f"""{prefix}.embeddings.LayerNorm.{w}"""
_UpperCAmelCase : str = state_dict[param_name]
# Transformer Blocks #
_UpperCAmelCase : Dict = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_UpperCAmelCase : str = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
_UpperCAmelCase : Any = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_UpperCAmelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_UpperCAmelCase : Dict = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase : int = state_dict[f"""lm_head.dense.{w}"""]
_UpperCAmelCase : int = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_UpperCAmelCase : List[str] = state_dict[f"""{prefix}.ln_f.{w}"""]
_UpperCAmelCase : Any = state_dict["""lm_head.weight"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 683 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase ( a_ ):
"""simple docstring"""
def _lowercase ( self ) -> Dict:
_UpperCamelCase : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(_snake_case , '''depth_multiplier''' ) )
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case , _snake_case=13 , _snake_case=3 , _snake_case=32 , _snake_case=0.25 , _snake_case=8 , _snake_case=True , _snake_case=1024 , _snake_case=32 , _snake_case="relu6" , _snake_case=0.1 , _snake_case=0.02 , _snake_case=True , _snake_case=True , _snake_case=10 , _snake_case=None , ) -> Dict:
_UpperCamelCase : Union[str, Any] = parent
_UpperCamelCase : int = batch_size
_UpperCamelCase : str = num_channels
_UpperCamelCase : Dict = image_size
_UpperCamelCase : Union[str, Any] = depth_multiplier
_UpperCamelCase : List[Any] = min_depth
_UpperCamelCase : Tuple = tf_padding
_UpperCamelCase : Any = int(last_hidden_size * depth_multiplier )
_UpperCamelCase : Dict = output_stride
_UpperCamelCase : Tuple = hidden_act
_UpperCamelCase : int = classifier_dropout_prob
_UpperCamelCase : List[str] = use_labels
_UpperCamelCase : Any = is_training
_UpperCamelCase : Any = num_labels
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Union[str, Any] = scope
def _lowercase ( self ) -> str:
_UpperCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : List[Any] = None
_UpperCamelCase : Optional[Any] = None
if self.use_labels:
_UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCamelCase : str = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowercase ( self ) -> List[str]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _lowercase ( self , _snake_case , _snake_case , _snake_case , _snake_case ) -> Optional[int]:
_UpperCamelCase : List[Any] = MobileNetVaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
_UpperCamelCase : Optional[Any] = model(_snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowercase ( self , _snake_case , _snake_case , _snake_case , _snake_case ) -> int:
_UpperCamelCase : Optional[Any] = self.num_labels
_UpperCamelCase : int = MobileNetVaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
_UpperCamelCase : Tuple = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self ) -> str:
_UpperCamelCase : str = self.prepare_config_and_inputs()
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : int = config_and_inputs
_UpperCamelCase : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
A__ : Optional[int] = (
{'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
A__ : List[str] = False
A__ : List[Any] = False
A__ : Dict = False
A__ : Union[str, Any] = False
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : List[str] = MobileNetVaModelTester(self )
_UpperCamelCase : Union[str, Any] = MobileNetVaConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case )
def _lowercase ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def _lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def _lowercase ( self ) -> Tuple:
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def _lowercase ( self ) -> Optional[int]:
pass
def _lowercase ( self ) -> str:
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : str = model_class(_snake_case )
_UpperCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[Any] = [*signature.parameters.keys()]
_UpperCamelCase : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case )
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _lowercase ( self ) -> Optional[int]:
def check_hidden_states_output(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : Dict = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
_UpperCamelCase : str = model(**self._prepare_for_class(_snake_case , _snake_case ) )
_UpperCamelCase : str = outputs.hidden_states
_UpperCamelCase : List[Any] = 26
self.assertEqual(len(_snake_case ) , _snake_case )
_UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : Dict = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def _lowercase ( self ) -> Dict:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Dict = MobileNetVaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def snake_case__ ( ) -> Optional[Any]:
_UpperCamelCase : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self ) -> Optional[int]:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def _lowercase ( self ) -> Dict:
_UpperCamelCase : Dict = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(_snake_case )
_UpperCamelCase : Union[str, Any] = self.default_image_processor
_UpperCamelCase : Optional[int] = prepare_img()
_UpperCamelCase : List[str] = image_processor(images=_snake_case , return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
_UpperCamelCase : Optional[Any] = model(**_snake_case )
# verify the logits
_UpperCamelCase : Optional[int] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , _snake_case )
_UpperCamelCase : Union[str, Any] = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
| 683 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self , _snake_case , _snake_case ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : int = None
_UpperCamelCase : int = 20
_UpperCamelCase : Any = self._get_uniform_logits(batch_size=2 , length=_snake_case )
# tweak scores to not be uniform anymore
_UpperCamelCase : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_UpperCamelCase : Dict = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_UpperCamelCase : Any = jax.nn.softmax(_snake_case , axis=-1 )
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : List[str] = FlaxTemperatureLogitsWarper(temperature=1.3 )
_UpperCamelCase : List[str] = jax.nn.softmax(temp_dist_warper_sharper(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
_UpperCamelCase : str = jax.nn.softmax(temp_dist_warper_smoother(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase ( self ) -> Any:
_UpperCamelCase : List[Any] = None
_UpperCamelCase : Optional[int] = 10
_UpperCamelCase : Any = 2
# create ramp distribution
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy()
_UpperCamelCase : Union[str, Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_UpperCamelCase : Optional[int] = 5
_UpperCamelCase : str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_UpperCamelCase : Union[str, Any] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, length) ).copy()
_UpperCamelCase : Optional[Any] = top_k_warp_safety_check(_snake_case , _snake_case , cur_len=_snake_case )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : Any = None
_UpperCamelCase : Any = 10
_UpperCamelCase : List[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_UpperCamelCase : Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_UpperCamelCase : List[str] = FlaxTopPLogitsWarper(0.8 )
_UpperCamelCase : Dict = np.exp(top_p_warp(_snake_case , _snake_case , cur_len=_snake_case ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_UpperCamelCase : Optional[int] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# check edge cases with negative and extreme logits
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_UpperCamelCase : Tuple = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_UpperCamelCase : Tuple = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_UpperCamelCase : Dict = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase ( self ) -> Dict:
_UpperCamelCase : List[Any] = 20
_UpperCamelCase : Optional[int] = 4
_UpperCamelCase : int = 0
_UpperCamelCase : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
# check that min length is applied at length 5
_UpperCamelCase : Any = ids_tensor((batch_size, 20) , vocab_size=20 )
_UpperCamelCase : int = 5
_UpperCamelCase : List[Any] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
_UpperCamelCase : Optional[int] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = 15
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Optional[int] = 20
_UpperCamelCase : Union[str, Any] = 4
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
# check that all scores are -inf except the bos_token_id score
_UpperCamelCase : Union[str, Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : str = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : str = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_UpperCamelCase : List[str] = 3
_UpperCamelCase : Tuple = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 20
_UpperCamelCase : Tuple = 4
_UpperCamelCase : Any = 0
_UpperCamelCase : str = 5
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
# check that all scores are -inf except the eos_token_id when max_length is reached
_UpperCamelCase : Optional[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
_UpperCamelCase : Dict = 4
_UpperCamelCase : Dict = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : int = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_UpperCamelCase : Optional[int] = 3
_UpperCamelCase : Any = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 4
_UpperCamelCase : Optional[Any] = 10
_UpperCamelCase : Dict = 15
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : Optional[Any] = 1
_UpperCamelCase : List[Any] = 15
# dummy input_ids and scores
_UpperCamelCase : Optional[int] = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Any = input_ids.copy()
_UpperCamelCase : int = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Tuple = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : List[str] = 10
# no processor list
_UpperCamelCase : Dict = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
# with processor list
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : Optional[Any] = processor(_snake_case , _snake_case , cur_len=_snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Tuple = 4
_UpperCamelCase : int = 10
_UpperCamelCase : List[Any] = 15
_UpperCamelCase : Dict = 2
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Optional[int] = 15
# dummy input_ids and scores
_UpperCamelCase : Tuple = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Optional[Any] = input_ids.copy()
_UpperCamelCase : List[str] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : Union[str, Any] = 10
# no processor list
def run_no_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : List[Any] = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
return scores
# with processor list
def run_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : List[str] = processor(_snake_case , _snake_case , cur_len=_snake_case )
return scores
_UpperCamelCase : Dict = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jitted_run_no_processor_list(_snake_case , _snake_case , _snake_case )
_UpperCamelCase : Any = jitted_run_processor_list(_snake_case , _snake_case , _snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 683 | 1 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {"""vocab_file""": """vocab.txt"""}
_UpperCAmelCase : List[Any] = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
_UpperCAmelCase : Tuple = {
"""openbmb/cpm-ant-10b""": 1024,
}
def snake_case__ ( UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : str = collections.OrderedDict()
with open(UpperCamelCase ,'''r''' ,encoding='''utf-8''' ) as reader:
_UpperCamelCase : int = reader.readlines()
for index, token in enumerate(UpperCamelCase ):
_UpperCamelCase : Any = token.rstrip('''\n''' )
_UpperCamelCase : Optional[Any] = index
return vocab
class UpperCAmelCase ( a_ ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case="<unk>" , _snake_case=200 ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = vocab
_UpperCamelCase : List[Any] = unk_token
_UpperCamelCase : Dict = max_input_chars_per_word
def _lowercase ( self , _snake_case ) -> Any:
_UpperCamelCase : List[str] = list(_snake_case )
if len(_snake_case ) > self.max_input_chars_per_word:
return [self.unk_token]
_UpperCamelCase : List[str] = 0
_UpperCamelCase : Optional[int] = []
while start < len(_snake_case ):
_UpperCamelCase : int = len(_snake_case )
_UpperCamelCase : Dict = None
while start < end:
_UpperCamelCase : int = ''''''.join(chars[start:end] )
if substr in self.vocab:
_UpperCamelCase : Dict = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_snake_case )
_UpperCamelCase : List[Any] = end
return sub_tokens
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : List[str] = VOCAB_FILES_NAMES
A__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = ['input_ids', 'attention_mask']
A__ : str = False
def __init__( self , _snake_case , _snake_case="<d>" , _snake_case="</d>" , _snake_case="<s>" , _snake_case="</s>" , _snake_case="<pad>" , _snake_case="<unk>" , _snake_case="</n>" , _snake_case="</_>" , _snake_case="left" , **_snake_case , ) -> str:
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_snake_case , eod_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , pad_token=_snake_case , unk_token=_snake_case , line_token=_snake_case , space_token=_snake_case , padding_side=_snake_case , **_snake_case , )
_UpperCamelCase : Optional[int] = bod_token
_UpperCamelCase : Optional[int] = eod_token
_UpperCamelCase : Optional[int] = load_vocab(_snake_case )
_UpperCamelCase : int = self.encoder[space_token]
_UpperCamelCase : Dict = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_UpperCamelCase : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _snake_case : x[1] ) )
_UpperCamelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
_UpperCamelCase : List[str] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _lowercase ( self ) -> Dict:
return self.encoder[self.bod_token]
@property
def _lowercase ( self ) -> str:
return self.encoder[self.eod_token]
@property
def _lowercase ( self ) -> Optional[Any]:
return self.encoder["\n"]
@property
def _lowercase ( self ) -> int:
return len(self.encoder )
def _lowercase ( self ) -> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self , _snake_case ) -> Union[str, Any]:
_UpperCamelCase : Dict = []
for x in jieba.cut(_snake_case , cut_all=_snake_case ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_snake_case ) )
return output_tokens
def _lowercase ( self , _snake_case , **_snake_case ) -> Optional[Any]:
_UpperCamelCase : Any = [i for i in token_ids if i >= 0]
_UpperCamelCase : int = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_snake_case , **_snake_case )
def _lowercase ( self , _snake_case ) -> Dict:
return token in self.encoder
def _lowercase ( self , _snake_case ) -> str:
return "".join(_snake_case )
def _lowercase ( self , _snake_case ) -> List[str]:
return self.encoder.get(_snake_case , self.encoder.get(self.unk_token ) )
def _lowercase ( self , _snake_case ) -> Optional[int]:
return self.decoder.get(_snake_case , self.unk_token )
def _lowercase ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
if os.path.isdir(_snake_case ):
_UpperCamelCase : Dict = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
_UpperCamelCase : Any = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
_UpperCamelCase : Optional[int] = 0
if " " in self.encoder:
_UpperCamelCase : Optional[int] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
_UpperCamelCase : Optional[int] = self.encoder['''\n''']
del self.encoder["\n"]
_UpperCamelCase : List[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _snake_case : x[1] ) )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
''' Please check that the vocabulary is not corrupted!''' )
_UpperCamelCase : int = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def _lowercase ( self , _snake_case , _snake_case = None ) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _lowercase ( self , _snake_case , _snake_case = None , _snake_case = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case ))
return [1] + ([0] * len(_snake_case ))
| 683 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_UpperCAmelCase : Optional[int] = pytest.mark.integration
@pytest.mark.parametrize('''path''' ,['''paws''', '''csv'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Dict:
inspect_dataset(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' ,['''accuracy'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> int:
inspect_metric(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : List[str] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
_UpperCamelCase : List[str] = get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[str]:
with pytest.raises(UpperCamelCase ):
get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
@pytest.mark.parametrize(
'''path, expected''' ,[
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : int = get_dataset_config_names(UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' ,[
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
_UpperCamelCase : Dict = get_dataset_infos(UpperCamelCase )
assert list(infos.keys() ) == expected_configs
_UpperCamelCase : Dict = expected_configs[0]
assert expected_config in infos
_UpperCamelCase : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = get_dataset_infos(UpperCamelCase )
assert expected_config in infos
_UpperCamelCase : Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
with pytest.raises(UpperCamelCase ):
get_dataset_split_names(UpperCamelCase ,config_name=UpperCamelCase )
| 683 | 1 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def snake_case__ ( UpperCamelCase=None ) -> Optional[int]:
if subparsers is not None:
_UpperCamelCase : Dict = subparsers.add_parser('''env''' )
else:
_UpperCamelCase : Tuple = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' ,default=UpperCamelCase ,help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase )
return parser
def snake_case__ ( UpperCamelCase ) -> Any:
_UpperCamelCase : int = torch.__version__
_UpperCamelCase : int = torch.cuda.is_available()
_UpperCamelCase : List[str] = is_xpu_available()
_UpperCamelCase : Dict = is_npu_available()
_UpperCamelCase : Optional[Any] = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCamelCase ):
_UpperCamelCase : List[str] = load_config_from_file(args.config_file ).to_dict()
_UpperCamelCase : List[Any] = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''PyTorch XPU available''': str(UpperCamelCase ),
'''PyTorch NPU available''': str(UpperCamelCase ),
'''System RAM''': f'''{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB''',
}
if pt_cuda_available:
_UpperCamelCase : int = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([f'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
_UpperCamelCase : Union[str, Any] = (
'''\n'''.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase ,UpperCamelCase )
else f'''\t{accelerate_config}'''
)
print(UpperCamelCase )
_UpperCamelCase : str = accelerate_config
return info
def snake_case__ ( ) -> int:
_UpperCamelCase : str = env_command_parser()
_UpperCamelCase : Any = parser.parse_args()
env_command(UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 683 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCamelCase : Any = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def _lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def _lowercase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase : Optional[int] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
_UpperCamelCase : int = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Tuple = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_UpperCamelCase : int = DDPMScheduler()
_UpperCamelCase : Optional[int] = AudioDiffusionPipeline(vqvae=_snake_case , unet=self.dummy_unet , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case , steps=4 )
_UpperCamelCase : Union[str, Any] = output.audios[0]
_UpperCamelCase : Union[str, Any] = output.images[0]
_UpperCamelCase : str = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : int = pipe(generator=_snake_case , steps=4 , return_dict=_snake_case )
_UpperCamelCase : int = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_UpperCamelCase : List[str] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : List[str] = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : int = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_UpperCamelCase : Dict = DDIMScheduler()
_UpperCamelCase : str = self.dummy_vqvae_and_unet
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : Optional[Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_UpperCamelCase : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Tuple = pipe(raw_audio=_snake_case , generator=_snake_case , start_step=5 , steps=10 )
_UpperCamelCase : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_UpperCamelCase : Any = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Tuple = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : Any = self.dummy_unet_condition
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_snake_case , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : Union[str, Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : int = torch.rand((1, 1, 10) )
_UpperCamelCase : Optional[Any] = pipe(generator=_snake_case , encoding=_snake_case )
_UpperCamelCase : Dict = output.images[0]
_UpperCamelCase : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Any = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = torch_device
_UpperCamelCase : int = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
_UpperCamelCase : str = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case )
_UpperCamelCase : List[Any] = output.audios[0]
_UpperCamelCase : List[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_UpperCamelCase : Union[str, Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Union[str, Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 683 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Optional[int] = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 683 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCAmelCase : Tuple = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 683 | 1 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' ,['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' ,['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' ,[None, '''v2'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Any:
_UpperCamelCase : Optional[Any] = hf_hub_url(repo_id=UpperCamelCase ,path=UpperCamelCase ,revision=UpperCamelCase )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(UpperCamelCase )}'''
| 683 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : Optional[int] = {
"""distilbert-base-uncased""": 512,
"""distilbert-base-uncased-distilled-squad""": 512,
"""distilbert-base-cased""": 512,
"""distilbert-base-cased-distilled-squad""": 512,
"""distilbert-base-german-cased""": 512,
"""distilbert-base-multilingual-cased""": 512,
}
_UpperCAmelCase : Any = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : Dict = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A__ : Union[str, Any] = ['input_ids', 'attention_mask']
A__ : Tuple = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ) -> int:
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
_UpperCamelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _snake_case ) != tokenize_chinese_chars
):
_UpperCamelCase : int = getattr(_snake_case , normalizer_state.pop('''type''' ) )
_UpperCamelCase : Optional[int] = do_lower_case
_UpperCamelCase : Dict = strip_accents
_UpperCamelCase : List[Any] = tokenize_chinese_chars
_UpperCamelCase : Tuple = normalizer_class(**_snake_case )
_UpperCamelCase : Dict = do_lower_case
def _lowercase ( self , _snake_case , _snake_case=None ) -> Optional[int]:
_UpperCamelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , _snake_case , _snake_case = None ) -> List[int]:
_UpperCamelCase : Union[str, Any] = [self.sep_token_id]
_UpperCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
_UpperCamelCase : Optional[Any] = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 683 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase : int = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 683 |
'''simple docstring'''
def snake_case__ ( UpperCamelCase ) -> list:
_UpperCamelCase : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCamelCase : List[str] = True
for i in range(0 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : int = False
for i in range(1 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : Optional[int] = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase : Optional[int] = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase : Union[str, Any] = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 683 | 1 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_UpperCAmelCase : Dict = """bart"""
_UpperCAmelCase : List[str] = True
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> int:
if LOAD_DENSE_INDEX:
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase : Tuple = qar_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase : Any = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase : Dict = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase : Tuple = sas_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = make_qa_sas_model(
model_name='''t5-small''' ,from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' ,device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> List[Any]:
if LOAD_DENSE_INDEX:
_UpperCamelCase : str = faiss.StandardGpuResources()
_UpperCamelCase : Optional[int] = datasets.load_dataset(path='''wiki_snippets''' ,name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase : List[str] = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(wikiaab_passages.num_rows, 1_28) ,)
_UpperCamelCase : Any = faiss.IndexFlatIP(1_28 )
_UpperCamelCase : str = faiss.index_cpu_to_gpu(UpperCamelCase ,1 ,UpperCamelCase )
wikiaab_gpu_index_flat.add(UpperCamelCase ) # TODO fix for larger GPU
else:
_UpperCamelCase, _UpperCamelCase : Optional[int] = (None, None)
_UpperCamelCase : int = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : List[Any] = datasets.load_dataset('''eli5''' ,name='''LFQA_reddit''' )
_UpperCamelCase : Optional[int] = elia['''train_eli5''']
_UpperCamelCase : Any = np.memmap(
'''eli5_questions_reps.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(elia_train.num_rows, 1_28) )
_UpperCamelCase : Optional[Any] = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(UpperCamelCase )
return (elia_train, eli5_train_q_index)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_indexes()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_models()
_UpperCAmelCase , _UpperCAmelCase : int = load_train_data()
def snake_case__ ( UpperCamelCase ,UpperCamelCase=10 ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = embed_questions_for_retrieval([question] ,UpperCamelCase ,UpperCamelCase )
_UpperCamelCase, _UpperCamelCase : Optional[Any] = eli5_train_q_index.search(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = [elia_train[int(UpperCamelCase )] for i in I[0]]
return nn_examples
def snake_case__ ( UpperCamelCase ,UpperCamelCase="wiki40b" ,UpperCamelCase="dense" ,UpperCamelCase=10 ) -> Optional[int]:
if source == "none":
_UpperCamelCase, _UpperCamelCase : Dict = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase, _UpperCamelCase : str = query_qa_dense_index(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
else:
_UpperCamelCase, _UpperCamelCase : str = query_es_index(
UpperCamelCase ,UpperCamelCase ,index_name='''english_wiki40b_snippets_100w''' ,n_results=UpperCamelCase ,)
_UpperCamelCase : Optional[int] = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase : Optional[Any] = '''question: {} context: {}'''.format(UpperCamelCase ,UpperCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCamelCase : None),
} )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=64 ,UpperCamelCase=2_56 ,UpperCamelCase=False ,UpperCamelCase=2 ,UpperCamelCase=0.95 ,UpperCamelCase=0.8 ) -> Optional[Any]:
with torch.no_grad():
_UpperCamelCase : Any = qa_sas_generate(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,num_answers=1 ,num_beams=UpperCamelCase ,min_len=UpperCamelCase ,max_len=UpperCamelCase ,do_sample=UpperCamelCase ,temp=UpperCamelCase ,top_p=UpperCamelCase ,top_k=UpperCamelCase ,max_input_length=10_24 ,device='''cuda:0''' ,)[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_UpperCAmelCase : str = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_UpperCAmelCase : Tuple = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_UpperCAmelCase : Dict = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_UpperCAmelCase : List[str] = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_UpperCAmelCase : Optional[int] = st.sidebar.checkbox("""Demo options""")
if demo_options:
_UpperCAmelCase : List[str] = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_UpperCAmelCase : List[Any] = action_list.index(action_st)
_UpperCAmelCase : Tuple = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_UpperCAmelCase : Optional[Any] = show_type == """Show full text of passages"""
else:
_UpperCAmelCase : Union[str, Any] = 3
_UpperCAmelCase : str = True
_UpperCAmelCase : str = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_UpperCAmelCase : Optional[Any] = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_UpperCAmelCase : Optional[Any] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_UpperCAmelCase : Dict = """wiki40b"""
_UpperCAmelCase : str = """dense"""
_UpperCAmelCase : List[str] = """beam"""
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : List[str] = 64
_UpperCAmelCase : List[Any] = 256
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = st.sidebar.checkbox("""Generation options""")
if generate_options:
_UpperCAmelCase : Union[str, Any] = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_UpperCAmelCase : Dict = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_UpperCAmelCase : List[Any] = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_UpperCAmelCase : List[str] = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[int] = None
# start main text
_UpperCAmelCase : Union[str, Any] = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_UpperCAmelCase : int = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_UpperCAmelCase : Any = st.text_input("""Enter your question here:""", """""")
else:
_UpperCAmelCase : Tuple = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_UpperCAmelCase , _UpperCAmelCase : str = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_UpperCAmelCase , _UpperCAmelCase : List[Any] = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_UpperCAmelCase : int = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_UpperCAmelCase : int = support_list[:10]
_UpperCAmelCase : Tuple = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_UpperCAmelCase , _UpperCAmelCase : Any = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_UpperCAmelCase : Tuple = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_UpperCAmelCase : List[Any] = res[1].strip()
if sec_titles == "":
_UpperCAmelCase : Optional[int] = """[{}]({})""".format(res[0], wiki_url)
else:
_UpperCAmelCase : Optional[int] = sec_titles.split(""" & """)
_UpperCAmelCase : Tuple = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_UpperCAmelCase : Dict = find_nearest_training(question)
_UpperCAmelCase : List[Any] = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_UpperCAmelCase : List[Any] = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_UpperCAmelCase : List[Any] = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 683 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = checkpoint
_UpperCamelCase : int = {}
_UpperCamelCase : int = vae_state_dict['''encoder.conv_in.weight''']
_UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_in.bias''']
_UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_out.weight''']
_UpperCamelCase : Any = vae_state_dict['''encoder.conv_out.bias''']
_UpperCamelCase : List[Any] = vae_state_dict['''encoder.norm_out.weight''']
_UpperCamelCase : str = vae_state_dict['''encoder.norm_out.bias''']
_UpperCamelCase : str = vae_state_dict['''decoder.conv_in.weight''']
_UpperCamelCase : List[Any] = vae_state_dict['''decoder.conv_in.bias''']
_UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.weight''']
_UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.bias''']
_UpperCamelCase : int = vae_state_dict['''decoder.norm_out.weight''']
_UpperCamelCase : Dict = vae_state_dict['''decoder.norm_out.bias''']
_UpperCamelCase : Optional[int] = vae_state_dict['''quant_conv.weight''']
_UpperCamelCase : int = vae_state_dict['''quant_conv.bias''']
_UpperCamelCase : List[Any] = vae_state_dict['''post_quant_conv.weight''']
_UpperCamelCase : Optional[int] = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
_UpperCamelCase : Optional[int] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
_UpperCamelCase : Tuple = {
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
_UpperCamelCase : Any = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
_UpperCamelCase : int = {
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(UpperCamelCase )
}
for i in range(UpperCamelCase ):
_UpperCamelCase : Any = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
_UpperCamelCase : Optional[int] = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
_UpperCamelCase : Dict = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
_UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Union[str, Any] = {'''old''': f'''down.{i}.block''', '''new''': f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : List[str] = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
_UpperCamelCase : Tuple = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
_UpperCamelCase : Optional[int] = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
_UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Tuple = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : Tuple = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
_UpperCamelCase : List[str] = renew_vae_attention_paths(UpperCamelCase )
_UpperCamelCase : Any = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
for i in range(UpperCamelCase ):
_UpperCamelCase : Union[str, Any] = num_up_blocks - 1 - i
_UpperCamelCase : Optional[int] = [
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
_UpperCamelCase : Tuple = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
_UpperCamelCase : Any = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
_UpperCamelCase : Any = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Any = {'''old''': f'''up.{block_id}.block''', '''new''': f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : List[Any] = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
_UpperCamelCase : Optional[Any] = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
_UpperCamelCase : int = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
_UpperCamelCase : Optional[int] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Optional[Any] = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : Tuple = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
_UpperCamelCase : Tuple = renew_vae_attention_paths(UpperCamelCase )
_UpperCamelCase : Dict = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
return new_checkpoint
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,) -> List[str]:
# Only support V1
_UpperCamelCase : Tuple = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
_UpperCamelCase : List[Any] = io.BytesIO(r.content )
_UpperCamelCase : Optional[int] = OmegaConf.load(UpperCamelCase )
_UpperCamelCase : str = 5_12
_UpperCamelCase : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
_UpperCamelCase : str = {}
with safe_open(UpperCamelCase ,framework='''pt''' ,device='''cpu''' ) as f:
for key in f.keys():
_UpperCamelCase : Union[str, Any] = f.get_tensor(UpperCamelCase )
else:
_UpperCamelCase : str = torch.load(UpperCamelCase ,map_location=UpperCamelCase )['''state_dict''']
# Convert the VAE model.
_UpperCamelCase : Dict = create_vae_diffusers_config(UpperCamelCase ,image_size=UpperCamelCase )
_UpperCamelCase : str = custom_convert_ldm_vae_checkpoint(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Dict = AutoencoderKL(**UpperCamelCase )
vae.load_state_dict(UpperCamelCase )
vae.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
_UpperCAmelCase : int = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 683 | 1 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def snake_case__ ( UpperCamelCase ) -> int:
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' ,set() )
@pytest.fixture
def snake_case__ ( UpperCamelCase ) -> Tuple:
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case ) -> str:
_UpperCamelCase : Dict = metric_id
class UpperCAmelCase :
"""simple docstring"""
A__ : Tuple = [MetricMock(a_ ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def _lowercase ( self ) -> List[Any]:
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' ,HfhMock() )
@pytest.mark.parametrize(
'''func, args''' ,[(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
if "tmp_path" in args:
_UpperCamelCase : Any = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(UpperCamelCase ,match='''https://huggingface.co/docs/evaluate''' ):
func(*UpperCamelCase )
| 683 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : str = ['image_processor', 'tokenizer']
A__ : Dict = 'CLIPImageProcessor'
A__ : str = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> List[Any]:
_UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
_UpperCamelCase : Optional[Any] = kwargs.pop('''feature_extractor''' )
_UpperCamelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Dict:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_UpperCamelCase : List[str] = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
_UpperCamelCase : str = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
_UpperCamelCase : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Tuple:
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Any:
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _lowercase ( self ) -> int:
_UpperCamelCase : Optional[int] = self.tokenizer.model_input_names
_UpperCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 683 | 1 |
'''simple docstring'''
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def snake_case__ ( UpperCamelCase = "" ) -> dict[str, float]:
_UpperCamelCase : List[Any] = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
_UpperCamelCase : Dict = BeautifulSoup(requests.get(UpperCamelCase ).text ,'''html.parser''' )
_UpperCamelCase : int = soup.find_all('''td''' ,attrs='''titleColumn''' )
_UpperCamelCase : List[Any] = soup.find_all('''td''' ,class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(UpperCamelCase ,UpperCamelCase )
}
def snake_case__ ( UpperCamelCase = "IMDb_Top_250_Movies.csv" ) -> None:
_UpperCamelCase : Optional[int] = get_imdb_top_aaa_movies()
with open(UpperCamelCase ,'''w''' ,newline='''''' ) as out_file:
_UpperCamelCase : int = csv.writer(UpperCamelCase )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 683 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_UpperCAmelCase : Union[str, Any] = (720, 1280) # Height, Width
_UpperCAmelCase : str = (0.4, 0.6) # if height or width lower than this scale, drop it.
_UpperCAmelCase : Optional[Any] = 1 / 100
_UpperCAmelCase : Optional[Any] = """"""
_UpperCAmelCase : int = """"""
_UpperCAmelCase : Union[str, Any] = """"""
_UpperCAmelCase : List[Any] = 250
def snake_case__ ( ) -> None:
_UpperCamelCase, _UpperCamelCase : List[Any] = get_dataset(UpperCamelCase ,UpperCamelCase )
for index in range(UpperCamelCase ):
_UpperCamelCase : List[str] = random.sample(range(len(UpperCamelCase ) ) ,4 )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = update_image_and_anno(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,filter_scale=UpperCamelCase ,)
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_UpperCamelCase : List[str] = random_chars(32 )
_UpperCamelCase : List[str] = path.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
_UpperCamelCase : Any = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' ,UpperCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
_UpperCamelCase : Any = []
for anno in new_annos:
_UpperCamelCase : List[Any] = anno[3] - anno[1]
_UpperCamelCase : int = anno[4] - anno[2]
_UpperCamelCase : int = anno[1] + width / 2
_UpperCamelCase : int = anno[2] + height / 2
_UpperCamelCase : Optional[Any] = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(UpperCamelCase )
with open(f'''{file_root}.txt''' ,'''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> tuple[list, list]:
_UpperCamelCase : List[str] = []
_UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(UpperCamelCase ,'''*.txt''' ) ):
_UpperCamelCase : int = label_file.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
with open(UpperCamelCase ) as in_file:
_UpperCamelCase : Dict = in_file.readlines()
_UpperCamelCase : Tuple = os.path.join(UpperCamelCase ,f'''{label_name}.jpg''' )
_UpperCamelCase : Tuple = []
for obj_list in obj_lists:
_UpperCamelCase : List[Any] = obj_list.rstrip('''\n''' ).split(''' ''' )
_UpperCamelCase : Tuple = float(obj[1] ) - float(obj[3] ) / 2
_UpperCamelCase : Any = float(obj[2] ) - float(obj[4] ) / 2
_UpperCamelCase : Tuple = float(obj[1] ) + float(obj[3] ) / 2
_UpperCamelCase : List[Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCamelCase )
labels.append(UpperCamelCase )
return img_paths, labels
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = 0.0 ,) -> tuple[list, list, str]:
_UpperCamelCase : Optional[int] = np.zeros([output_size[0], output_size[1], 3] ,dtype=np.uinta )
_UpperCamelCase : str = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = int(scale_x * output_size[1] )
_UpperCamelCase : Dict = int(scale_y * output_size[0] )
_UpperCamelCase : int = []
_UpperCamelCase : Union[str, Any] = []
for i, index in enumerate(UpperCamelCase ):
_UpperCamelCase : Optional[int] = all_img_list[index]
path_list.append(UpperCamelCase )
_UpperCamelCase : str = all_annos[index]
_UpperCamelCase : Tuple = cva.imread(UpperCamelCase )
if i == 0: # top-left
_UpperCamelCase : Any = cva.resize(UpperCamelCase ,(divid_point_x, divid_point_y) )
_UpperCamelCase : Any = img
for bbox in img_annos:
_UpperCamelCase : List[Any] = bbox[1] * scale_x
_UpperCamelCase : Dict = bbox[2] * scale_y
_UpperCamelCase : Any = bbox[3] * scale_x
_UpperCamelCase : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_UpperCamelCase : Union[str, Any] = cva.resize(UpperCamelCase ,(output_size[1] - divid_point_x, divid_point_y) )
_UpperCamelCase : List[Any] = img
for bbox in img_annos:
_UpperCamelCase : Any = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Optional[Any] = bbox[2] * scale_y
_UpperCamelCase : Any = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : Optional[int] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_UpperCamelCase : Dict = cva.resize(UpperCamelCase ,(divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : List[str] = img
for bbox in img_annos:
_UpperCamelCase : int = bbox[1] * scale_x
_UpperCamelCase : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : int = bbox[3] * scale_x
_UpperCamelCase : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_UpperCamelCase : Dict = cva.resize(
UpperCamelCase ,(output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : Union[str, Any] = img
for bbox in img_annos:
_UpperCamelCase : Optional[int] = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_UpperCamelCase : Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case__ ( UpperCamelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_UpperCamelCase : Tuple = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase ) for _ in range(UpperCamelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 683 | 1 |
'''simple docstring'''
_UpperCAmelCase : Tuple = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 683 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCAmelCase ( a_ ):
"""simple docstring"""
@slow
@require_torch
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
_UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_UpperCamelCase : Optional[Any] = bertabert.config.encoder.vocab_size
_UpperCamelCase : List[str] = tokenizer.sep_token_id
_UpperCamelCase : List[str] = tokenizer.cls_token_id
_UpperCamelCase : Optional[Any] = 128
_UpperCamelCase : int = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
_UpperCamelCase : Dict = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
_UpperCamelCase : Dict = train_dataset.select(range(32 ) )
_UpperCamelCase : Tuple = val_dataset.select(range(16 ) )
_UpperCamelCase : Union[str, Any] = 4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCamelCase : Optional[Any] = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_snake_case , max_length=512 )
_UpperCamelCase : Optional[int] = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_snake_case , max_length=128 )
_UpperCamelCase : str = inputs.input_ids
_UpperCamelCase : Union[str, Any] = inputs.attention_mask
_UpperCamelCase : str = outputs.input_ids
_UpperCamelCase : str = outputs.input_ids.copy()
_UpperCamelCase : Tuple = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
_UpperCamelCase : Union[str, Any] = outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
_UpperCamelCase : Dict = pred.label_ids
_UpperCamelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_UpperCamelCase : Any = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCamelCase : Dict = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCamelCase : int = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
_UpperCamelCase : Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
_UpperCamelCase : List[Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
_UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_UpperCamelCase : Union[str, Any] = SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy='''steps''' , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCamelCase : Optional[int] = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train()
| 683 | 1 |
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case , _snake_case ) -> Optional[Any]:
_UpperCamelCase, _UpperCamelCase : int = text, pattern
_UpperCamelCase, _UpperCamelCase : List[Any] = len(_snake_case ), len(_snake_case )
def _lowercase ( self , _snake_case ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _lowercase ( self , _snake_case ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _lowercase ( self ) -> list[int]:
# searches pattern in text and returns index positions
_UpperCamelCase : List[str] = []
for i in range(self.textLen - self.patLen + 1 ):
_UpperCamelCase : Optional[Any] = self.mismatch_in_text(_snake_case )
if mismatch_index == -1:
positions.append(_snake_case )
else:
_UpperCamelCase : Tuple = self.match_in_pattern(self.text[mismatch_index] )
_UpperCamelCase : Union[str, Any] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_UpperCAmelCase : Optional[Any] = """ABAABA"""
_UpperCAmelCase : int = """AB"""
_UpperCAmelCase : Dict = BoyerMooreSearch(text, pattern)
_UpperCAmelCase : str = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 683 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def snake_case__ ( UpperCamelCase=None ) -> Optional[int]:
if subparsers is not None:
_UpperCamelCase : Dict = subparsers.add_parser('''env''' )
else:
_UpperCamelCase : Tuple = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' ,default=UpperCamelCase ,help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase )
return parser
def snake_case__ ( UpperCamelCase ) -> Any:
_UpperCamelCase : int = torch.__version__
_UpperCamelCase : int = torch.cuda.is_available()
_UpperCamelCase : List[str] = is_xpu_available()
_UpperCamelCase : Dict = is_npu_available()
_UpperCamelCase : Optional[Any] = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCamelCase ):
_UpperCamelCase : List[str] = load_config_from_file(args.config_file ).to_dict()
_UpperCamelCase : List[Any] = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''PyTorch XPU available''': str(UpperCamelCase ),
'''PyTorch NPU available''': str(UpperCamelCase ),
'''System RAM''': f'''{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB''',
}
if pt_cuda_available:
_UpperCamelCase : int = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([f'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
_UpperCamelCase : Union[str, Any] = (
'''\n'''.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase ,UpperCamelCase )
else f'''\t{accelerate_config}'''
)
print(UpperCamelCase )
_UpperCamelCase : str = accelerate_config
return info
def snake_case__ ( ) -> int:
_UpperCamelCase : str = env_command_parser()
_UpperCamelCase : Any = parser.parse_args()
env_command(UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 683 | 1 |
'''simple docstring'''
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> int:
while a != 0:
_UpperCamelCase, _UpperCamelCase : Any = b % a, a
return b
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> int:
if gcd(UpperCamelCase ,UpperCamelCase ) != 1:
_UpperCamelCase : List[Any] = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(UpperCamelCase )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = 1, 0, a
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Any = 0, 1, m
while va != 0:
_UpperCamelCase : Optional[Any] = ua // va
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 683 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
def snake_case__ ( UpperCamelCase ) -> Tuple:
_UpperCamelCase : str = '''huggingface/label-files'''
_UpperCamelCase : Optional[Any] = '''imagenet-1k-id2label.json'''
_UpperCamelCase : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase ,UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
_UpperCamelCase : Optional[int] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_UpperCamelCase : Dict = {v: k for k, v in idalabel.items()}
_UpperCamelCase : Optional[Any] = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_UpperCamelCase : Union[str, Any] = BitConfig(
conv_layer=UpperCamelCase ,num_labels=10_00 ,idalabel=UpperCamelCase ,labelaid=UpperCamelCase ,)
return config
def snake_case__ ( UpperCamelCase ) -> str:
if "stem.conv" in name:
_UpperCamelCase : Any = name.replace('''stem.conv''' ,'''bit.embedder.convolution''' )
if "blocks" in name:
_UpperCamelCase : Union[str, Any] = name.replace('''blocks''' ,'''layers''' )
if "head.fc" in name:
_UpperCamelCase : Optional[Any] = name.replace('''head.fc''' ,'''classifier.1''' )
if name.startswith('''norm''' ):
_UpperCamelCase : Any = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
_UpperCamelCase : List[Any] = '''bit.encoder.''' + name
return name
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase : List[str] = Image.open(requests.get(UpperCamelCase ,stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[Any]:
_UpperCamelCase : str = get_config(UpperCamelCase )
# load original model from timm
_UpperCamelCase : int = create_model(UpperCamelCase ,pretrained=UpperCamelCase )
timm_model.eval()
# load state_dict of original model
_UpperCamelCase : int = timm_model.state_dict()
for key in state_dict.copy().keys():
_UpperCamelCase : int = state_dict.pop(UpperCamelCase )
_UpperCamelCase : Any = val.squeeze() if '''head''' in key else val
# load HuggingFace model
_UpperCamelCase : List[str] = BitForImageClassification(UpperCamelCase )
model.eval()
model.load_state_dict(UpperCamelCase )
# create image processor
_UpperCamelCase : Optional[int] = create_transform(**resolve_data_config({} ,model=UpperCamelCase ) )
_UpperCamelCase : Any = transform.transforms
_UpperCamelCase : List[str] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
_UpperCamelCase : List[str] = BitImageProcessor(
do_resize=UpperCamelCase ,size={'''shortest_edge''': timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=UpperCamelCase ,crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} ,do_normalize=UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCamelCase : str = prepare_img()
_UpperCamelCase : Dict = transform(UpperCamelCase ).unsqueeze(0 )
_UpperCamelCase : Dict = processor(UpperCamelCase ,return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(UpperCamelCase ,UpperCamelCase )
# verify logits
with torch.no_grad():
_UpperCamelCase : Optional[int] = model(UpperCamelCase )
_UpperCamelCase : Optional[int] = outputs.logits
print('''Logits:''' ,logits[0, :3] )
print('''Predicted class:''' ,model.config.idalabel[logits.argmax(-1 ).item()] )
_UpperCamelCase : List[Any] = timm_model(UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase ,outputs.logits ,atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
_UpperCAmelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 683 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/resolve/main/config.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/config.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/config.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json""",
}
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : Union[str, Any] = 'bloom'
A__ : Any = ['past_key_values']
A__ : Any = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self , _snake_case=250880 , _snake_case=64 , _snake_case=2 , _snake_case=8 , _snake_case=1E-5 , _snake_case=0.02 , _snake_case=True , _snake_case=1 , _snake_case=2 , _snake_case=False , _snake_case=0.0 , _snake_case=0.0 , _snake_case=1 , _snake_case=False , **_snake_case , ) -> List[str]:
_UpperCamelCase : Any = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCamelCase : Optional[Any] = kwargs.pop('''n_embed''' , _snake_case )
_UpperCamelCase : Dict = hidden_size if n_embed is None else n_embed
_UpperCamelCase : Dict = n_layer
_UpperCamelCase : str = n_head
_UpperCamelCase : int = layer_norm_epsilon
_UpperCamelCase : str = initializer_range
_UpperCamelCase : List[Any] = use_cache
_UpperCamelCase : Union[str, Any] = pretraining_tp
_UpperCamelCase : Any = apply_residual_connection_post_layernorm
_UpperCamelCase : Optional[Any] = hidden_dropout
_UpperCamelCase : Optional[Any] = attention_dropout
_UpperCamelCase : Dict = bos_token_id
_UpperCamelCase : int = eos_token_id
_UpperCamelCase : Dict = slow_but_exact
super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : Any = version.parse('1.12' )
def __init__( self , _snake_case , _snake_case = "default" , _snake_case = None , _snake_case = False , ) -> Any:
super().__init__(_snake_case , task=_snake_case , patching_specs=_snake_case , use_past=_snake_case )
if not getattr(self._config , '''pad_token_id''' , _snake_case ):
# TODO: how to do that better?
_UpperCamelCase : List[Any] = 0
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
_UpperCamelCase : Optional[int] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_snake_case , direction='''inputs''' , inverted_values_shape=_snake_case )
_UpperCamelCase : List[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_UpperCamelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _lowercase ( self ) -> int:
return self._config.n_layer
@property
def _lowercase ( self ) -> int:
return self._config.n_head
@property
def _lowercase ( self ) -> float:
return 1E-3
def _lowercase ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , ) -> Mapping[str, Any]:
_UpperCamelCase : Optional[int] = super(_snake_case , self ).generate_dummy_inputs(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case )
# We need to order the input in the way they appears in the forward()
_UpperCamelCase : Dict = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_UpperCamelCase : Tuple = seqlen + 2
_UpperCamelCase : Dict = self._config.hidden_size // self.num_attention_heads
_UpperCamelCase : List[Any] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
_UpperCamelCase : Optional[Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
_UpperCamelCase : Tuple = [
(torch.zeros(_snake_case ), torch.zeros(_snake_case )) for _ in range(self.num_layers )
]
_UpperCamelCase : str = common_inputs['''attention_mask''']
if self.use_past:
_UpperCamelCase : List[Any] = ordered_inputs['''attention_mask'''].dtype
_UpperCamelCase : Optional[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_snake_case , _snake_case , dtype=_snake_case )] , dim=1 )
return ordered_inputs
@property
def _lowercase ( self ) -> int:
return 13
| 683 |
'''simple docstring'''
_UpperCAmelCase : Any = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def snake_case__ ( UpperCamelCase ) -> int:
_UpperCamelCase : Any = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_UpperCAmelCase : list[bool | None] = [None] * 10000000
_UpperCAmelCase : str = True
_UpperCAmelCase : Tuple = False
def snake_case__ ( UpperCamelCase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_UpperCamelCase : List[str] = chain(next_number(UpperCamelCase ) )
_UpperCamelCase : Tuple = number_chain
while number < 10_00_00_00:
_UpperCamelCase : int = number_chain
number *= 10
return number_chain
def snake_case__ ( UpperCamelCase = 10_00_00_00 ) -> int:
for i in range(1 ,UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 683 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_UpperCAmelCase : Dict = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase :
"""simple docstring"""
A__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A__ : Optional[str] = field(
default=a_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
A__ : Optional[str] = field(
default=a_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A__ : bool = field(default=a_ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
A__ : Optional[str] = field(
default=a_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class UpperCAmelCase :
"""simple docstring"""
A__ : str = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
A__ : Optional[str] = field(
default=a_ , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , )
A__ : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A__ : bool = field(
default=a_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def snake_case__ ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
_UpperCamelCase : List[Any] = import_module('''tasks''' )
try:
_UpperCamelCase : Dict = getattr(UpperCamelCase ,model_args.task_type )
_UpperCamelCase : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' ,UpperCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
_UpperCamelCase : Tuple = token_classification_task.get_labels(data_args.labels )
_UpperCamelCase : Dict[int, str] = dict(enumerate(UpperCamelCase ) )
_UpperCamelCase : Any = len(UpperCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=UpperCamelCase ,idalabel=UpperCamelCase ,labelaid={label: i for i, label in enumerate(UpperCamelCase )} ,cache_dir=model_args.cache_dir ,)
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast ,)
_UpperCamelCase : Any = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=UpperCamelCase ,cache_dir=model_args.cache_dir ,)
# Get datasets
_UpperCamelCase : Optional[Any] = (
TokenClassificationDataset(
token_classification_task=UpperCamelCase ,data_dir=data_args.data_dir ,tokenizer=UpperCamelCase ,labels=UpperCamelCase ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_UpperCamelCase : Any = (
TokenClassificationDataset(
token_classification_task=UpperCamelCase ,data_dir=data_args.data_dir ,tokenizer=UpperCamelCase ,labels=UpperCamelCase ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def align_predictions(UpperCamelCase ,UpperCamelCase ) -> Tuple[List[int], List[int]]:
_UpperCamelCase : Dict = np.argmax(UpperCamelCase ,axis=2 )
_UpperCamelCase, _UpperCamelCase : int = preds.shape
_UpperCamelCase : List[str] = [[] for _ in range(UpperCamelCase )]
_UpperCamelCase : Dict = [[] for _ in range(UpperCamelCase )]
for i in range(UpperCamelCase ):
for j in range(UpperCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCamelCase ) -> Dict:
_UpperCamelCase, _UpperCamelCase : Any = align_predictions(p.predictions ,p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCamelCase ,UpperCamelCase ),
"precision": precision_score(UpperCamelCase ,UpperCamelCase ),
"recall": recall_score(UpperCamelCase ,UpperCamelCase ),
"f1": fa_score(UpperCamelCase ,UpperCamelCase ),
}
# Data collator
_UpperCamelCase : Tuple = DataCollatorWithPadding(UpperCamelCase ,pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCamelCase : Union[str, Any] = Trainer(
model=UpperCamelCase ,args=UpperCamelCase ,train_dataset=UpperCamelCase ,eval_dataset=UpperCamelCase ,compute_metrics=UpperCamelCase ,data_collator=UpperCamelCase ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase : Union[str, Any] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase : Union[str, Any] = trainer.evaluate()
_UpperCamelCase : Tuple = os.path.join(training_args.output_dir ,'''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(UpperCamelCase ,'''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' ,UpperCamelCase ,UpperCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(UpperCamelCase )
# Predict
if training_args.do_predict:
_UpperCamelCase : List[Any] = TokenClassificationDataset(
token_classification_task=UpperCamelCase ,data_dir=data_args.data_dir ,tokenizer=UpperCamelCase ,labels=UpperCamelCase ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.test ,)
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = trainer.predict(UpperCamelCase )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = align_predictions(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[int] = os.path.join(training_args.output_dir ,'''test_results.txt''' )
if trainer.is_world_process_zero():
with open(UpperCamelCase ,'''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' ,UpperCamelCase ,UpperCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
_UpperCamelCase : Union[str, Any] = os.path.join(training_args.output_dir ,'''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(UpperCamelCase ,'''w''' ) as writer:
with open(os.path.join(data_args.data_dir ,'''test.txt''' ) ,'''r''' ) as f:
token_classification_task.write_predictions_to_file(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
return results
def snake_case__ ( UpperCamelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 683 |
'''simple docstring'''
_UpperCAmelCase : str = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : List[str] = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> str:
assert len(str(UpperCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_UpperCamelCase : Any = year // 1_00
_UpperCamelCase : List[Any] = (5 * (century % 4) + 2) % 7
_UpperCamelCase : Tuple = year % 1_00
_UpperCamelCase : Optional[int] = centurian % 12
_UpperCamelCase : Tuple = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_UpperCamelCase : List[Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_UpperCamelCase : Optional[int] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 1 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_UpperCAmelCase : Optional[Any] = """docs/source/en/_toctree.yml"""
def snake_case__ ( UpperCamelCase ) -> List[str]:
_UpperCamelCase : int = defaultdict(UpperCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
_UpperCamelCase : str = [key for key, value in counts.items() if value > 1]
_UpperCamelCase : Union[str, Any] = []
for duplicate_key in duplicates:
_UpperCamelCase : str = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(UpperCamelCase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(UpperCamelCase ,key=lambda UpperCamelCase : s["title"].lower() )
def snake_case__ ( UpperCamelCase=False ) -> Optional[Any]:
with open(UpperCamelCase ,encoding='''utf-8''' ) as f:
_UpperCamelCase : int = yaml.safe_load(f.read() )
# Get to the API doc
_UpperCamelCase : List[str] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_UpperCamelCase : Optional[Any] = content[api_idx]['''sections''']
# Then to the model doc
_UpperCamelCase : Tuple = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_UpperCamelCase : Dict = api_doc[model_idx]['''sections''']
_UpperCamelCase : Any = [(idx, section) for idx, section in enumerate(UpperCamelCase ) if '''sections''' in section]
_UpperCamelCase : int = False
for idx, modality_doc in modalities_docs:
_UpperCamelCase : Optional[int] = modality_doc['''sections''']
_UpperCamelCase : Optional[int] = clean_model_doc_toc(UpperCamelCase )
if old_modality_doc != new_modality_doc:
_UpperCamelCase : str = True
if overwrite:
_UpperCamelCase : Union[str, Any] = new_modality_doc
if diff:
if overwrite:
_UpperCamelCase : Union[str, Any] = model_doc
_UpperCamelCase : List[str] = api_doc
with open(UpperCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(yaml.dump(UpperCamelCase ,allow_unicode=UpperCamelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_UpperCAmelCase : Tuple = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 683 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
"""simple docstring"""
@staticmethod
def _lowercase ( *_snake_case , **_snake_case ) -> str:
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _lowercase ( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
_UpperCamelCase : int = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Any = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def _lowercase ( self , _snake_case , _snake_case ) -> List[str]:
_UpperCamelCase : int = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
] , )
@require_torch
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[int] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
_UpperCamelCase : List[Any] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
@slow
@require_torch
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[Any] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : str = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : Dict = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def _lowercase ( self ) -> List[Any]:
pass
| 683 | 1 |
'''simple docstring'''
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = "" , _snake_case = False ) -> None:
# Mapping from the first character of the prefix of the node
_UpperCamelCase : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
_UpperCamelCase : Any = is_leaf
_UpperCamelCase : Optional[Any] = prefix
def _lowercase ( self , _snake_case ) -> tuple[str, str, str]:
_UpperCamelCase : Optional[int] = 0
for q, w in zip(self.prefix , _snake_case ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def _lowercase ( self , _snake_case ) -> None:
for word in words:
self.insert(_snake_case )
def _lowercase ( self , _snake_case ) -> None:
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
_UpperCamelCase : int = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_UpperCamelCase : Tuple = RadixNode(prefix=_snake_case , is_leaf=_snake_case )
else:
_UpperCamelCase : Dict = self.nodes[word[0]]
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Any = incoming_node.match(
_snake_case )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(_snake_case )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_UpperCamelCase : Union[str, Any] = remaining_prefix
_UpperCamelCase : Union[str, Any] = self.nodes[matching_string[0]]
_UpperCamelCase : Optional[Any] = RadixNode(_snake_case , _snake_case )
_UpperCamelCase : str = aux_node
if remaining_word == "":
_UpperCamelCase : str = True
else:
self.nodes[matching_string[0]].insert(_snake_case )
def _lowercase ( self , _snake_case ) -> bool:
_UpperCamelCase : List[str] = self.nodes.get(word[0] , _snake_case )
if not incoming_node:
return False
else:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Union[str, Any] = incoming_node.match(
_snake_case )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(_snake_case )
def _lowercase ( self , _snake_case ) -> bool:
_UpperCamelCase : Tuple = self.nodes.get(word[0] , _snake_case )
if not incoming_node:
return False
else:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = incoming_node.match(
_snake_case )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(_snake_case )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_UpperCamelCase : List[str] = list(self.nodes.values() )[0]
_UpperCamelCase : int = merging_node.is_leaf
self.prefix += merging_node.prefix
_UpperCamelCase : str = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_UpperCamelCase : str = False
# If there is 1 edge, we merge it with its child
else:
_UpperCamelCase : List[Any] = list(incoming_node.nodes.values() )[0]
_UpperCamelCase : str = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_UpperCamelCase : List[str] = merging_node.nodes
return True
def _lowercase ( self , _snake_case = 0 ) -> None:
if self.prefix != "":
print('''-''' * height , self.prefix , ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def snake_case__ ( ) -> bool:
_UpperCamelCase : Dict = '''banana bananas bandana band apple all beast'''.split()
_UpperCamelCase : Union[str, Any] = RadixNode()
root.insert_many(UpperCamelCase )
assert all(root.find(UpperCamelCase ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def snake_case__ ( ) -> None:
assert test_trie()
def snake_case__ ( ) -> None:
_UpperCamelCase : str = RadixNode()
_UpperCamelCase : Optional[int] = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(UpperCamelCase )
print('''Words:''' ,UpperCamelCase )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 683 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_UpperCAmelCase : Tuple = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 683 | 1 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_UpperCAmelCase : Optional[int] = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
_UpperCAmelCase : Optional[Any] = parser.parse_args()
_UpperCAmelCase : List[Any] = """cpu"""
_UpperCAmelCase : int = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
_UpperCAmelCase : Any = """path-to-your-trained-model"""
_UpperCAmelCase : str = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_UpperCAmelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_UpperCAmelCase : Union[str, Any] = pipe.to(device)
# to channels last
_UpperCAmelCase : Optional[int] = pipe.unet.to(memory_format=torch.channels_last)
_UpperCAmelCase : List[Any] = pipe.vae.to(memory_format=torch.channels_last)
_UpperCAmelCase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_UpperCAmelCase : int = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_UpperCAmelCase : Dict = torch.randn(2, 4, 64, 64)
_UpperCAmelCase : Optional[Any] = torch.rand(1) * 999
_UpperCAmelCase : Optional[Any] = torch.randn(2, 77, 768)
_UpperCAmelCase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
_UpperCAmelCase : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_UpperCAmelCase : List[str] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_UpperCAmelCase : Tuple = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_UpperCAmelCase : Dict = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_UpperCAmelCase : Any = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_UpperCAmelCase : int = 666
_UpperCAmelCase : Any = torch.Generator(device).manual_seed(seed)
_UpperCAmelCase : Dict = {"""generator""": generator}
if args.steps is not None:
_UpperCAmelCase : Optional[Any] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_UpperCAmelCase : str = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 683 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase="attention" ) -> List[str]:
_UpperCamelCase : Dict = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
_UpperCamelCase : int = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_UpperCamelCase : str = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
_UpperCamelCase : Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_UpperCamelCase : Any = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
_UpperCamelCase : Optional[int] = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_UpperCamelCase : Optional[Any] = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
_UpperCamelCase : List[Any] = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[str]:
if split_mlp_wi:
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
_UpperCamelCase : Tuple = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
_UpperCamelCase : Optional[Any] = (wi_a, wi_a)
else:
_UpperCamelCase : str = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def snake_case__ ( UpperCamelCase ,*, UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ) -> int:
_UpperCamelCase : Any = traverse_util.flatten_dict(variables['''target'''] )
_UpperCamelCase : Optional[Any] = {'''/'''.join(UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_UpperCamelCase : str = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,UpperCamelCase )
_UpperCamelCase : Optional[int] = collections.OrderedDict()
# Shared embeddings.
_UpperCamelCase : str = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''attention''' )
_UpperCamelCase : Tuple = layer_norm
_UpperCamelCase : int = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : Dict = v.T
# Block i, layer 1 (MLP).
_UpperCamelCase : Dict = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : int = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,UpperCamelCase )
_UpperCamelCase : Union[str, Any] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Optional[Any] = wi[1].T
else:
_UpperCamelCase : List[Any] = wi.T
_UpperCamelCase : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : Union[str, Any] = tax_relpos_bias_lookup(
UpperCamelCase ,UpperCamelCase ,'''encoder''' ).T
_UpperCamelCase : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_UpperCamelCase : List[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''encoder''' ).T
_UpperCamelCase : Optional[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''self_attention''' )
_UpperCamelCase : int = layer_norm
_UpperCamelCase : Union[str, Any] = k.T
_UpperCamelCase : Optional[int] = o.T
_UpperCamelCase : Dict = q.T
_UpperCamelCase : Tuple = v.T
# Block i, layer 1 (Cross Attention).
_UpperCamelCase : str = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''encoder_decoder_attention''' )
_UpperCamelCase : Dict = layer_norm
_UpperCamelCase : Optional[int] = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : str = v.T
# Block i, layer 2 (MLP).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : List[Any] = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,UpperCamelCase )
_UpperCamelCase : List[str] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Union[str, Any] = wi[1].T
else:
_UpperCamelCase : Dict = wi.T
_UpperCamelCase : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : int = tax_relpos_bias_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ).T
_UpperCamelCase : Optional[int] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_UpperCamelCase : str = old['''decoder/logits_dense/kernel'''].T
return new
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
_UpperCamelCase : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : str = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : int = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_UpperCamelCase : Any = state_dict['''shared.weight''']
return state_dict
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Any:
_UpperCamelCase : List[Any] = checkpoints.load_tax_checkpoint(UpperCamelCase )
_UpperCamelCase : str = convert_tax_to_pytorch(
UpperCamelCase ,num_layers=config.num_layers ,is_encoder_only=UpperCamelCase ,scalable_attention=UpperCamelCase )
_UpperCamelCase : Optional[Any] = make_state_dict(UpperCamelCase ,UpperCamelCase )
model.load_state_dict(UpperCamelCase ,strict=UpperCamelCase )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,UpperCamelCase = False ,) -> int:
_UpperCamelCase : int = MTaConfig.from_json_file(UpperCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_UpperCamelCase : Optional[int] = UMTaEncoderModel(UpperCamelCase )
else:
_UpperCamelCase : Optional[int] = UMTaForConditionalGeneration(UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCamelCase )
print('''Done''' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 683 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : int = 'gpt_neox'
def __init__( self , _snake_case=50432 , _snake_case=6144 , _snake_case=44 , _snake_case=64 , _snake_case=24576 , _snake_case="gelu" , _snake_case=0.25 , _snake_case=10000 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.1 , _snake_case=2048 , _snake_case=0.02 , _snake_case=1E-5 , _snake_case=True , _snake_case=0 , _snake_case=2 , _snake_case=False , _snake_case=True , _snake_case=None , **_snake_case , ) -> Any:
super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
_UpperCamelCase : int = vocab_size
_UpperCamelCase : Optional[Any] = max_position_embeddings
_UpperCamelCase : Any = hidden_size
_UpperCamelCase : Dict = num_hidden_layers
_UpperCamelCase : int = num_attention_heads
_UpperCamelCase : Any = intermediate_size
_UpperCamelCase : Union[str, Any] = hidden_act
_UpperCamelCase : Optional[Any] = rotary_pct
_UpperCamelCase : Dict = rotary_emb_base
_UpperCamelCase : Optional[int] = attention_dropout
_UpperCamelCase : Tuple = hidden_dropout
_UpperCamelCase : Optional[int] = classifier_dropout
_UpperCamelCase : Optional[int] = initializer_range
_UpperCamelCase : str = layer_norm_eps
_UpperCamelCase : str = use_cache
_UpperCamelCase : str = tie_word_embeddings
_UpperCamelCase : Optional[Any] = use_parallel_residual
_UpperCamelCase : Tuple = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def _lowercase ( self ) -> Union[str, Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'''got {self.rope_scaling}''' )
_UpperCamelCase : List[str] = self.rope_scaling.get('''type''' , _snake_case )
_UpperCamelCase : Any = self.rope_scaling.get('''factor''' , _snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_snake_case , _snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 683 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_UpperCAmelCase : int = 100
_UpperCAmelCase : List[Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_UpperCAmelCase : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def snake_case__ ( UpperCamelCase ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_UpperCamelCase : set[int] = set()
_UpperCamelCase : int
_UpperCamelCase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def snake_case__ ( UpperCamelCase = 50_00 ) -> int | None:
for number_to_partition in range(1 ,UpperCamelCase ):
if len(partition(UpperCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 683 | 1 |
'''simple docstring'''
from manim import *
class UpperCAmelCase ( a_ ):
"""simple docstring"""
def _lowercase ( self ) -> str:
_UpperCamelCase : List[Any] = Rectangle(height=0.5 , width=0.5 )
_UpperCamelCase : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_UpperCamelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
_UpperCamelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
_UpperCamelCase : int = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
_UpperCamelCase : Optional[Any] = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
_UpperCamelCase : Optional[int] = VGroup(_snake_case , _snake_case ).arrange(_snake_case , buff=0 )
_UpperCamelCase : Dict = Text('''CPU''' , font_size=24 )
_UpperCamelCase : Optional[Any] = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_snake_case )
_UpperCamelCase : int = [mem.copy() for i in range(4 )]
_UpperCamelCase : str = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
_UpperCamelCase : str = Text('''GPU''' , font_size=24 )
_UpperCamelCase : str = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
gpu.move_to([-1, -1, 0] )
self.add(_snake_case )
_UpperCamelCase : Optional[int] = [mem.copy() for i in range(6 )]
_UpperCamelCase : str = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
_UpperCamelCase : int = Text('''Model''' , font_size=24 )
_UpperCamelCase : List[Any] = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
model.move_to([3, -1.0, 0] )
self.add(_snake_case )
_UpperCamelCase : Optional[int] = []
for i, rect in enumerate(_snake_case ):
rect.set_stroke(_snake_case )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_UpperCamelCase : Tuple = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=_snake_case , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=_snake_case , buff=0.0 )
self.add(_snake_case )
cpu_targs.append(_snake_case )
_UpperCamelCase : List[Any] = [mem.copy() for i in range(6 )]
_UpperCamelCase : int = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
_UpperCamelCase : Dict = Text('''Loaded Checkpoint''' , font_size=24 )
_UpperCamelCase : Union[str, Any] = Group(_snake_case , _snake_case ).arrange(_snake_case , aligned_edge=_snake_case , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_UpperCamelCase : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCamelCase : Tuple = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_snake_case , _snake_case )
_UpperCamelCase : List[str] = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_UpperCamelCase : List[str] = MarkupText(
F'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_snake_case ) , Write(_snake_case ) )
self.play(Write(_snake_case , run_time=1 ) , Create(_snake_case , run_time=1 ) )
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : List[Any] = []
for i, rect in enumerate(_snake_case ):
_UpperCamelCase : Union[str, Any] = fill.copy().set_fill(_snake_case , opacity=0.7 )
target.move_to(_snake_case )
first_animations.append(GrowFromCenter(_snake_case , run_time=1 ) )
_UpperCamelCase : str = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(_snake_case , run_time=1.5 ) )
self.play(*_snake_case )
self.play(*_snake_case )
self.wait()
| 683 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_UpperCAmelCase : Dict = """bart"""
_UpperCAmelCase : List[str] = True
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> int:
if LOAD_DENSE_INDEX:
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase : Tuple = qar_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase : Any = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase : Dict = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase : Tuple = sas_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = make_qa_sas_model(
model_name='''t5-small''' ,from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' ,device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> List[Any]:
if LOAD_DENSE_INDEX:
_UpperCamelCase : str = faiss.StandardGpuResources()
_UpperCamelCase : Optional[int] = datasets.load_dataset(path='''wiki_snippets''' ,name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase : List[str] = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(wikiaab_passages.num_rows, 1_28) ,)
_UpperCamelCase : Any = faiss.IndexFlatIP(1_28 )
_UpperCamelCase : str = faiss.index_cpu_to_gpu(UpperCamelCase ,1 ,UpperCamelCase )
wikiaab_gpu_index_flat.add(UpperCamelCase ) # TODO fix for larger GPU
else:
_UpperCamelCase, _UpperCamelCase : Optional[int] = (None, None)
_UpperCamelCase : int = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : List[Any] = datasets.load_dataset('''eli5''' ,name='''LFQA_reddit''' )
_UpperCamelCase : Optional[int] = elia['''train_eli5''']
_UpperCamelCase : Any = np.memmap(
'''eli5_questions_reps.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(elia_train.num_rows, 1_28) )
_UpperCamelCase : Optional[Any] = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(UpperCamelCase )
return (elia_train, eli5_train_q_index)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_indexes()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_models()
_UpperCAmelCase , _UpperCAmelCase : int = load_train_data()
def snake_case__ ( UpperCamelCase ,UpperCamelCase=10 ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = embed_questions_for_retrieval([question] ,UpperCamelCase ,UpperCamelCase )
_UpperCamelCase, _UpperCamelCase : Optional[Any] = eli5_train_q_index.search(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = [elia_train[int(UpperCamelCase )] for i in I[0]]
return nn_examples
def snake_case__ ( UpperCamelCase ,UpperCamelCase="wiki40b" ,UpperCamelCase="dense" ,UpperCamelCase=10 ) -> Optional[int]:
if source == "none":
_UpperCamelCase, _UpperCamelCase : Dict = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase, _UpperCamelCase : str = query_qa_dense_index(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
else:
_UpperCamelCase, _UpperCamelCase : str = query_es_index(
UpperCamelCase ,UpperCamelCase ,index_name='''english_wiki40b_snippets_100w''' ,n_results=UpperCamelCase ,)
_UpperCamelCase : Optional[int] = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase : Optional[Any] = '''question: {} context: {}'''.format(UpperCamelCase ,UpperCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCamelCase : None),
} )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=64 ,UpperCamelCase=2_56 ,UpperCamelCase=False ,UpperCamelCase=2 ,UpperCamelCase=0.95 ,UpperCamelCase=0.8 ) -> Optional[Any]:
with torch.no_grad():
_UpperCamelCase : Any = qa_sas_generate(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,num_answers=1 ,num_beams=UpperCamelCase ,min_len=UpperCamelCase ,max_len=UpperCamelCase ,do_sample=UpperCamelCase ,temp=UpperCamelCase ,top_p=UpperCamelCase ,top_k=UpperCamelCase ,max_input_length=10_24 ,device='''cuda:0''' ,)[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_UpperCAmelCase : str = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_UpperCAmelCase : Tuple = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_UpperCAmelCase : Dict = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_UpperCAmelCase : List[str] = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_UpperCAmelCase : Optional[int] = st.sidebar.checkbox("""Demo options""")
if demo_options:
_UpperCAmelCase : List[str] = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_UpperCAmelCase : List[Any] = action_list.index(action_st)
_UpperCAmelCase : Tuple = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_UpperCAmelCase : Optional[Any] = show_type == """Show full text of passages"""
else:
_UpperCAmelCase : Union[str, Any] = 3
_UpperCAmelCase : str = True
_UpperCAmelCase : str = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_UpperCAmelCase : Optional[Any] = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_UpperCAmelCase : Optional[Any] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_UpperCAmelCase : Dict = """wiki40b"""
_UpperCAmelCase : str = """dense"""
_UpperCAmelCase : List[str] = """beam"""
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : List[str] = 64
_UpperCAmelCase : List[Any] = 256
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = st.sidebar.checkbox("""Generation options""")
if generate_options:
_UpperCAmelCase : Union[str, Any] = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_UpperCAmelCase : Dict = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_UpperCAmelCase : List[Any] = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_UpperCAmelCase : List[str] = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[int] = None
# start main text
_UpperCAmelCase : Union[str, Any] = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_UpperCAmelCase : int = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_UpperCAmelCase : Any = st.text_input("""Enter your question here:""", """""")
else:
_UpperCAmelCase : Tuple = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_UpperCAmelCase , _UpperCAmelCase : str = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_UpperCAmelCase , _UpperCAmelCase : List[Any] = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_UpperCAmelCase : int = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_UpperCAmelCase : int = support_list[:10]
_UpperCAmelCase : Tuple = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_UpperCAmelCase , _UpperCAmelCase : Any = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_UpperCAmelCase : Tuple = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_UpperCAmelCase : List[Any] = res[1].strip()
if sec_titles == "":
_UpperCAmelCase : Optional[int] = """[{}]({})""".format(res[0], wiki_url)
else:
_UpperCAmelCase : Optional[int] = sec_titles.split(""" & """)
_UpperCAmelCase : Tuple = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_UpperCAmelCase : Dict = find_nearest_training(question)
_UpperCAmelCase : List[Any] = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_UpperCAmelCase : List[Any] = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_UpperCAmelCase : List[Any] = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 683 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase : Dict = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 683 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> Optional[int]:
_UpperCamelCase : int = value
_UpperCamelCase : Node | None = None # Added in order to delete a node easier
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> List[Any]:
_UpperCamelCase : str = root
def __str__( self ) -> str:
return str(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if new_children is not None: # reset its kids
_UpperCamelCase : Union[str, Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_snake_case ): # If it is the right children
_UpperCamelCase : str = new_children
else:
_UpperCamelCase : Any = new_children
else:
_UpperCamelCase : Any = new_children
def _lowercase ( self , _snake_case ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _lowercase ( self ) -> bool:
return self.root is None
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : List[Any] = Node(_snake_case ) # create a new Node
if self.empty(): # if Tree is empty
_UpperCamelCase : Optional[Any] = new_node # set its root
else: # Tree is not empty
_UpperCamelCase : int = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_UpperCamelCase : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
_UpperCamelCase : Union[str, Any] = parent_node.left
else:
if parent_node.right is None:
_UpperCamelCase : Any = new_node
break
else:
_UpperCamelCase : str = parent_node.right
_UpperCamelCase : Any = parent_node
def _lowercase ( self , *_snake_case ) -> None:
for value in values:
self.__insert(_snake_case )
def _lowercase ( self , _snake_case ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
_UpperCamelCase : List[str] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_UpperCamelCase : Optional[Any] = node.left if value < node.value else node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
if self.root is None:
return None
_UpperCamelCase : Dict = self.root
if not self.empty():
while node.right is not None:
_UpperCamelCase : Tuple = node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
_UpperCamelCase : Optional[Any] = self.root
if self.root is None:
return None
if not self.empty():
_UpperCamelCase : Optional[int] = self.root
while node.left is not None:
_UpperCamelCase : List[str] = node.left
return node
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : str = self.search(_snake_case ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_snake_case , _snake_case )
elif node.left is None: # Has only right children
self.__reassign_nodes(_snake_case , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_snake_case , node.left )
else:
_UpperCamelCase : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_UpperCamelCase : int = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _lowercase ( self , _snake_case ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def _lowercase ( self , _snake_case=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if node:
self.inorder(_snake_case , node.left )
arr.append(node.value )
self.inorder(_snake_case , node.right )
def _lowercase ( self , _snake_case , _snake_case ) -> int:
_UpperCamelCase : list[int] = []
self.inorder(_snake_case , _snake_case ) # append all values to list using inorder traversal
return arr[k - 1]
def snake_case__ ( UpperCamelCase ) -> list[Node]:
_UpperCamelCase : int = []
if curr_node is not None:
_UpperCamelCase : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def snake_case__ ( ) -> None:
_UpperCamelCase : Any = (8, 3, 6, 1, 10, 14, 13, 4, 7)
_UpperCamelCase : Tuple = BinarySearchTree()
for i in testlist:
t.insert(UpperCamelCase )
# Prints all the elements of the list in order traversal
print(UpperCamelCase )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' ,t.get_max().value ) # type: ignore
print('''Min Value: ''' ,t.get_min().value ) # type: ignore
for i in testlist:
t.remove(UpperCamelCase )
print(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 683 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Dict = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : int = {
"""squeezebert/squeezebert-uncased""": 512,
"""squeezebert/squeezebert-mnli""": 512,
"""squeezebert/squeezebert-mnli-headless""": 512,
}
_UpperCAmelCase : List[str] = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : Optional[Any] = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[Any] = SqueezeBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ) -> Optional[int]:
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
_UpperCamelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _snake_case ) != tokenize_chinese_chars
):
_UpperCamelCase : Optional[int] = getattr(_snake_case , normalizer_state.pop('''type''' ) )
_UpperCamelCase : Optional[int] = do_lower_case
_UpperCamelCase : Tuple = strip_accents
_UpperCamelCase : str = tokenize_chinese_chars
_UpperCamelCase : str = normalizer_class(**_snake_case )
_UpperCamelCase : Dict = do_lower_case
def _lowercase ( self , _snake_case , _snake_case=None ) -> str:
_UpperCamelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , _snake_case , _snake_case = None ) -> List[int]:
_UpperCamelCase : str = [self.sep_token_id]
_UpperCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
_UpperCamelCase : Dict = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 683 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
_UpperCAmelCase : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
_UpperCAmelCase : int = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : Dict = 'whisper'
A__ : Tuple = ['past_key_values']
A__ : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _snake_case=51865 , _snake_case=80 , _snake_case=6 , _snake_case=4 , _snake_case=6 , _snake_case=4 , _snake_case=1536 , _snake_case=1536 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=50257 , _snake_case=True , _snake_case=True , _snake_case="gelu" , _snake_case=256 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=False , _snake_case=1500 , _snake_case=448 , _snake_case=50256 , _snake_case=50256 , _snake_case=50256 , _snake_case=None , _snake_case=[220, 50256] , _snake_case=False , _snake_case=256 , _snake_case=False , _snake_case=0.05 , _snake_case=10 , _snake_case=2 , _snake_case=0.0 , _snake_case=10 , _snake_case=0 , _snake_case=7 , **_snake_case , ) -> Any:
_UpperCamelCase : Union[str, Any] = vocab_size
_UpperCamelCase : Union[str, Any] = num_mel_bins
_UpperCamelCase : List[str] = d_model
_UpperCamelCase : str = encoder_layers
_UpperCamelCase : Optional[int] = encoder_attention_heads
_UpperCamelCase : str = decoder_layers
_UpperCamelCase : Tuple = decoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : Optional[int] = encoder_ffn_dim
_UpperCamelCase : Any = dropout
_UpperCamelCase : Optional[Any] = attention_dropout
_UpperCamelCase : List[Any] = activation_dropout
_UpperCamelCase : int = activation_function
_UpperCamelCase : List[Any] = init_std
_UpperCamelCase : Optional[int] = encoder_layerdrop
_UpperCamelCase : str = decoder_layerdrop
_UpperCamelCase : List[str] = use_cache
_UpperCamelCase : Optional[Any] = encoder_layers
_UpperCamelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase : List[str] = max_source_positions
_UpperCamelCase : Optional[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase : str = classifier_proj_size
_UpperCamelCase : List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase : int = apply_spec_augment
_UpperCamelCase : str = mask_time_prob
_UpperCamelCase : int = mask_time_length
_UpperCamelCase : List[Any] = mask_time_min_masks
_UpperCamelCase : List[str] = mask_feature_prob
_UpperCamelCase : Optional[int] = mask_feature_length
_UpperCamelCase : Union[str, Any] = mask_feature_min_masks
_UpperCamelCase : Union[str, Any] = median_filter_width
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , suppress_tokens=_snake_case , begin_suppress_tokens=_snake_case , **_snake_case , )
class UpperCAmelCase ( a_ ):
"""simple docstring"""
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
_UpperCamelCase : Dict = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
_UpperCamelCase : Tuple = {0: '''batch'''}
else:
_UpperCamelCase : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''' )
return common_inputs
def _lowercase ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , _snake_case = 22050 , _snake_case = 5.0 , _snake_case = 220 , ) -> Mapping[str, Any]:
_UpperCamelCase : Optional[int] = OrderedDict()
_UpperCamelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_snake_case , framework=_snake_case , sampling_rate=_snake_case , time_duration=_snake_case , frequency=_snake_case , )
_UpperCamelCase : int = encoder_inputs['''input_features'''].shape[2]
_UpperCamelCase : List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
_UpperCamelCase : str = super().generate_dummy_inputs(
preprocessor.tokenizer , _snake_case , _snake_case , _snake_case , _snake_case )
_UpperCamelCase : Union[str, Any] = encoder_inputs.pop('''input_features''' )
_UpperCamelCase : Dict = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
_UpperCamelCase : List[str] = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def _lowercase ( self ) -> float:
return 1E-3
| 683 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[Any] = DebertaTokenizer
A__ : Any = True
A__ : Union[str, Any] = DebertaTokenizerFast
def _lowercase ( self ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
_UpperCamelCase : Optional[int] = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
_UpperCamelCase : List[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_UpperCamelCase : str = {'''unk_token''': '''[UNK]'''}
_UpperCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_snake_case ) )
def _lowercase ( self , **_snake_case ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def _lowercase ( self , _snake_case ) -> Any:
_UpperCamelCase : Any = '''lower newer'''
_UpperCamelCase : List[Any] = '''lower newer'''
return input_text, output_text
def _lowercase ( self ) -> List[str]:
_UpperCamelCase : Tuple = self.get_tokenizer()
_UpperCamelCase : Tuple = '''lower newer'''
_UpperCamelCase : Dict = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_UpperCamelCase : Tuple = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_UpperCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
_UpperCamelCase : List[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def _lowercase ( self ) -> Optional[Any]:
_UpperCamelCase : str = self.get_tokenizer()
_UpperCamelCase : Union[str, Any] = tokenizer('''Hello''' , '''World''' )
_UpperCamelCase : Tuple = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , _snake_case )
@slow
def _lowercase ( self ) -> Any:
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
_UpperCamelCase : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=_snake_case )
_UpperCamelCase : Optional[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_snake_case )
_UpperCamelCase : Union[str, Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
_UpperCamelCase : List[str] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
_UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(_snake_case )
_UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Dict = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
_UpperCamelCase : int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
_UpperCamelCase : Optional[Any] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
_UpperCamelCase : List[str] = tokenizer(_snake_case , padding=_snake_case )
_UpperCamelCase : Union[str, Any] = [tokenizer.decode(_snake_case , skip_special_tokens=_snake_case ) for seq in encoding['''input_ids''']]
# fmt: off
_UpperCamelCase : Any = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_UpperCamelCase : str = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , _snake_case )
for expected, decoded in zip(_snake_case , _snake_case ):
self.assertEqual(_snake_case , _snake_case )
| 683 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase : int = parser.parse_args()
if args.model_type == "roberta":
_UpperCAmelCase : Union[str, Any] = RobertaForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase : int = """roberta"""
elif args.model_type == "gpt2":
_UpperCAmelCase : Optional[int] = GPTaLMHeadModel.from_pretrained(args.model_name)
_UpperCAmelCase : Optional[int] = """transformer"""
_UpperCAmelCase : Tuple = model.state_dict()
_UpperCAmelCase : int = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_UpperCAmelCase : Optional[Any] = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_UpperCAmelCase : Tuple = f"""{prefix}.embeddings.{w}.weight"""
_UpperCAmelCase : Optional[Any] = state_dict[param_name]
for w in ["weight", "bias"]:
_UpperCAmelCase : Union[str, Any] = f"""{prefix}.embeddings.LayerNorm.{w}"""
_UpperCAmelCase : str = state_dict[param_name]
# Transformer Blocks #
_UpperCAmelCase : Dict = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_UpperCAmelCase : str = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
_UpperCAmelCase : Any = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_UpperCAmelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_UpperCAmelCase : Dict = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase : int = state_dict[f"""lm_head.dense.{w}"""]
_UpperCAmelCase : int = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_UpperCAmelCase : List[str] = state_dict[f"""{prefix}.ln_f.{w}"""]
_UpperCAmelCase : Any = state_dict["""lm_head.weight"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 683 | 1 |
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_UpperCAmelCase : Union[str, Any] = HfApi()
_UpperCAmelCase : str = {}
# fmt: off
_UpperCAmelCase : Tuple = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
_UpperCAmelCase : str = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
_UpperCAmelCase : Optional[int] = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
_UpperCAmelCase : Union[str, Any] = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
_UpperCAmelCase : Optional[int] = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
_UpperCAmelCase : List[Any] = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
_UpperCAmelCase : Union[str, Any] = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
_UpperCAmelCase : Union[str, Any] = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
_UpperCAmelCase : int = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
_UpperCAmelCase : str = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
_UpperCAmelCase : Optional[int] = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
_UpperCAmelCase : Union[str, Any] = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
_UpperCAmelCase : List[str] = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
_UpperCAmelCase : Union[str, Any] = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
_UpperCAmelCase : List[Any] = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
_UpperCAmelCase : Optional[Any] = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_UpperCAmelCase : List[Any] = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("""CompVis"""):
_UpperCAmelCase : str = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
_UpperCAmelCase : Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_UpperCAmelCase : Dict = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_UpperCAmelCase : Optional[int] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_UpperCAmelCase : str = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 683 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self , _snake_case , _snake_case ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : int = None
_UpperCamelCase : int = 20
_UpperCamelCase : Any = self._get_uniform_logits(batch_size=2 , length=_snake_case )
# tweak scores to not be uniform anymore
_UpperCamelCase : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_UpperCamelCase : Dict = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_UpperCamelCase : Any = jax.nn.softmax(_snake_case , axis=-1 )
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : List[str] = FlaxTemperatureLogitsWarper(temperature=1.3 )
_UpperCamelCase : List[str] = jax.nn.softmax(temp_dist_warper_sharper(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
_UpperCamelCase : str = jax.nn.softmax(temp_dist_warper_smoother(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase ( self ) -> Any:
_UpperCamelCase : List[Any] = None
_UpperCamelCase : Optional[int] = 10
_UpperCamelCase : Any = 2
# create ramp distribution
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy()
_UpperCamelCase : Union[str, Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_UpperCamelCase : Optional[int] = 5
_UpperCamelCase : str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_UpperCamelCase : Union[str, Any] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, length) ).copy()
_UpperCamelCase : Optional[Any] = top_k_warp_safety_check(_snake_case , _snake_case , cur_len=_snake_case )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : Any = None
_UpperCamelCase : Any = 10
_UpperCamelCase : List[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_UpperCamelCase : Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_UpperCamelCase : List[str] = FlaxTopPLogitsWarper(0.8 )
_UpperCamelCase : Dict = np.exp(top_p_warp(_snake_case , _snake_case , cur_len=_snake_case ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_UpperCamelCase : Optional[int] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# check edge cases with negative and extreme logits
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_UpperCamelCase : Tuple = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_UpperCamelCase : Tuple = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_UpperCamelCase : Dict = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase ( self ) -> Dict:
_UpperCamelCase : List[Any] = 20
_UpperCamelCase : Optional[int] = 4
_UpperCamelCase : int = 0
_UpperCamelCase : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
# check that min length is applied at length 5
_UpperCamelCase : Any = ids_tensor((batch_size, 20) , vocab_size=20 )
_UpperCamelCase : int = 5
_UpperCamelCase : List[Any] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
_UpperCamelCase : Optional[int] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = 15
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Optional[int] = 20
_UpperCamelCase : Union[str, Any] = 4
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
# check that all scores are -inf except the bos_token_id score
_UpperCamelCase : Union[str, Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : str = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : str = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_UpperCamelCase : List[str] = 3
_UpperCamelCase : Tuple = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 20
_UpperCamelCase : Tuple = 4
_UpperCamelCase : Any = 0
_UpperCamelCase : str = 5
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
# check that all scores are -inf except the eos_token_id when max_length is reached
_UpperCamelCase : Optional[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
_UpperCamelCase : Dict = 4
_UpperCamelCase : Dict = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : int = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_UpperCamelCase : Optional[int] = 3
_UpperCamelCase : Any = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 4
_UpperCamelCase : Optional[Any] = 10
_UpperCamelCase : Dict = 15
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : Optional[Any] = 1
_UpperCamelCase : List[Any] = 15
# dummy input_ids and scores
_UpperCamelCase : Optional[int] = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Any = input_ids.copy()
_UpperCamelCase : int = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Tuple = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : List[str] = 10
# no processor list
_UpperCamelCase : Dict = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
# with processor list
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : Optional[Any] = processor(_snake_case , _snake_case , cur_len=_snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Tuple = 4
_UpperCamelCase : int = 10
_UpperCamelCase : List[Any] = 15
_UpperCamelCase : Dict = 2
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Optional[int] = 15
# dummy input_ids and scores
_UpperCamelCase : Tuple = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Optional[Any] = input_ids.copy()
_UpperCamelCase : List[str] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : Union[str, Any] = 10
# no processor list
def run_no_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : List[Any] = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
return scores
# with processor list
def run_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : List[str] = processor(_snake_case , _snake_case , cur_len=_snake_case )
return scores
_UpperCamelCase : Dict = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jitted_run_no_processor_list(_snake_case , _snake_case , _snake_case )
_UpperCamelCase : Any = jitted_run_processor_list(_snake_case , _snake_case , _snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 683 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> str | Literal[False]:
_UpperCamelCase : Optional[Any] = list(UpperCamelCase )
_UpperCamelCase : List[Any] = list(UpperCamelCase )
_UpperCamelCase : Any = 0
for i in range(len(UpperCamelCase ) ):
if lista[i] != lista[i]:
count += 1
_UpperCamelCase : List[Any] = '''_'''
if count > 1:
return False
else:
return "".join(UpperCamelCase )
def snake_case__ ( UpperCamelCase ) -> list[str]:
_UpperCamelCase : List[str] = []
while True:
_UpperCamelCase : int = ['''$'''] * len(UpperCamelCase )
_UpperCamelCase : Optional[Any] = []
for i in range(len(UpperCamelCase ) ):
for j in range(i + 1 ,len(UpperCamelCase ) ):
_UpperCamelCase : Any = compare_string(binary[i] ,binary[j] )
if k is False:
_UpperCamelCase : str = '''*'''
_UpperCamelCase : str = '''*'''
temp.append('''X''' )
for i in range(len(UpperCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(UpperCamelCase ) == 0:
return pi
_UpperCamelCase : Optional[int] = list(set(UpperCamelCase ) )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> list[str]:
_UpperCamelCase : str = []
for minterm in minterms:
_UpperCamelCase : int = ''''''
for _ in range(UpperCamelCase ):
_UpperCamelCase : int = str(minterm % 2 ) + string
minterm //= 2
temp.append(UpperCamelCase )
return temp
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> bool:
_UpperCamelCase : Optional[int] = list(UpperCamelCase )
_UpperCamelCase : List[str] = list(UpperCamelCase )
_UpperCamelCase : List[Any] = 0
for i in range(len(UpperCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> list[str]:
_UpperCamelCase : Dict = []
_UpperCamelCase : List[str] = [0] * len(UpperCamelCase )
for i in range(len(chart[0] ) ):
_UpperCamelCase : Tuple = 0
_UpperCamelCase : List[str] = -1
for j in range(len(UpperCamelCase ) ):
if chart[j][i] == 1:
count += 1
_UpperCamelCase : Union[str, Any] = j
if count == 1:
_UpperCamelCase : str = 1
for i in range(len(UpperCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(UpperCamelCase ) ):
_UpperCamelCase : Optional[int] = 0
temp.append(prime_implicants[i] )
while True:
_UpperCamelCase : List[str] = 0
_UpperCamelCase : Tuple = -1
_UpperCamelCase : Optional[Any] = 0
for i in range(len(UpperCamelCase ) ):
_UpperCamelCase : Tuple = chart[i].count(1 )
if count_n > max_n:
_UpperCamelCase : List[str] = count_n
_UpperCamelCase : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(UpperCamelCase ) ):
_UpperCamelCase : Optional[Any] = 0
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> list[list[int]]:
_UpperCamelCase : List[Any] = [[0 for x in range(len(UpperCamelCase ) )] for x in range(len(UpperCamelCase ) )]
for i in range(len(UpperCamelCase ) ):
_UpperCamelCase : Tuple = prime_implicants[i].count('''_''' )
for j in range(len(UpperCamelCase ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,UpperCamelCase ):
_UpperCamelCase : List[str] = 1
return chart
def snake_case__ ( ) -> None:
_UpperCamelCase : str = int(input('''Enter the no. of variables\n''' ) )
_UpperCamelCase : Optional[int] = [
float(UpperCamelCase )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
_UpperCamelCase : Optional[int] = decimal_to_binary(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : int = check(UpperCamelCase )
print('''Prime Implicants are:''' )
print(UpperCamelCase )
_UpperCamelCase : int = prime_implicant_chart(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Dict = selection(UpperCamelCase ,UpperCamelCase )
print('''Essential Prime Implicants are:''' )
print(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_UpperCAmelCase : Optional[int] = pytest.mark.integration
@pytest.mark.parametrize('''path''' ,['''paws''', '''csv'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Dict:
inspect_dataset(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' ,['''accuracy'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> int:
inspect_metric(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : List[str] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
_UpperCamelCase : List[str] = get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[str]:
with pytest.raises(UpperCamelCase ):
get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
@pytest.mark.parametrize(
'''path, expected''' ,[
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : int = get_dataset_config_names(UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' ,[
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
_UpperCamelCase : Dict = get_dataset_infos(UpperCamelCase )
assert list(infos.keys() ) == expected_configs
_UpperCamelCase : Dict = expected_configs[0]
assert expected_config in infos
_UpperCamelCase : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = get_dataset_infos(UpperCamelCase )
assert expected_config in infos
_UpperCamelCase : Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
with pytest.raises(UpperCamelCase ):
get_dataset_split_names(UpperCamelCase ,config_name=UpperCamelCase )
| 683 | 1 |
'''simple docstring'''
def snake_case__ ( UpperCamelCase ) -> list:
_UpperCamelCase : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCamelCase : List[str] = True
for i in range(0 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : int = False
for i in range(1 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : Optional[int] = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase : Optional[int] = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase : Union[str, Any] = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 683 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCamelCase : Any = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def _lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def _lowercase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase : Optional[int] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
_UpperCamelCase : int = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Tuple = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_UpperCamelCase : int = DDPMScheduler()
_UpperCamelCase : Optional[int] = AudioDiffusionPipeline(vqvae=_snake_case , unet=self.dummy_unet , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case , steps=4 )
_UpperCamelCase : Union[str, Any] = output.audios[0]
_UpperCamelCase : Union[str, Any] = output.images[0]
_UpperCamelCase : str = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : int = pipe(generator=_snake_case , steps=4 , return_dict=_snake_case )
_UpperCamelCase : int = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_UpperCamelCase : List[str] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : List[str] = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : int = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_UpperCamelCase : Dict = DDIMScheduler()
_UpperCamelCase : str = self.dummy_vqvae_and_unet
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : Optional[Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_UpperCamelCase : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Tuple = pipe(raw_audio=_snake_case , generator=_snake_case , start_step=5 , steps=10 )
_UpperCamelCase : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_UpperCamelCase : Any = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Tuple = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : Any = self.dummy_unet_condition
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_snake_case , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : Union[str, Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : int = torch.rand((1, 1, 10) )
_UpperCamelCase : Optional[Any] = pipe(generator=_snake_case , encoding=_snake_case )
_UpperCamelCase : Dict = output.images[0]
_UpperCamelCase : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Any = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = torch_device
_UpperCamelCase : int = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
_UpperCamelCase : str = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case )
_UpperCamelCase : List[Any] = output.audios[0]
_UpperCamelCase : List[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_UpperCamelCase : Union[str, Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Union[str, Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 683 | 1 |
'''simple docstring'''
def snake_case__ ( UpperCamelCase ) -> str:
_UpperCamelCase : str = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def snake_case__ ( UpperCamelCase ) -> dict[str, str]:
_UpperCamelCase : Any = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
_UpperCamelCase : Union[str, Any] = remove_duplicates(key.upper() )
_UpperCamelCase : int = len(UpperCamelCase )
# First fill cipher with key characters
_UpperCamelCase : Dict = {alphabet[i]: char for i, char in enumerate(UpperCamelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(UpperCamelCase ) ,26 ):
_UpperCamelCase : List[Any] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
_UpperCamelCase : List[str] = alphabet[i - offset]
_UpperCamelCase : Any = char
return cipher_alphabet
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> str:
return "".join(cipher_map.get(UpperCamelCase ,UpperCamelCase ) for ch in message.upper() )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> str:
_UpperCamelCase : int = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(UpperCamelCase ,UpperCamelCase ) for ch in message.upper() )
def snake_case__ ( ) -> None:
_UpperCamelCase : Any = input('''Enter message to encode or decode: ''' ).strip()
_UpperCamelCase : List[Any] = input('''Enter keyword: ''' ).strip()
_UpperCamelCase : int = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
_UpperCamelCase : int = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
_UpperCamelCase : List[Any] = create_cipher_map(UpperCamelCase )
print(func(UpperCamelCase ,UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCAmelCase : Tuple = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 683 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowercase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_UpperCamelCase : Optional[int] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def _lowercase ( self ) -> Any:
_UpperCamelCase : List[Any] = self.dummy_uncond_unet
_UpperCamelCase : int = PNDMScheduler()
_UpperCamelCase : List[str] = PNDMPipeline(unet=_snake_case , scheduler=_snake_case )
pndm.to(_snake_case )
pndm.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Union[str, Any] = torch.manual_seed(0 )
_UpperCamelCase : int = pndm(generator=_snake_case , num_inference_steps=20 , output_type='''numpy''' ).images
_UpperCamelCase : Optional[int] = torch.manual_seed(0 )
_UpperCamelCase : Any = pndm(generator=_snake_case , num_inference_steps=20 , output_type='''numpy''' , return_dict=_snake_case )[0]
_UpperCamelCase : List[Any] = image[0, -3:, -3:, -1]
_UpperCamelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase : Any = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> Dict:
_UpperCamelCase : int = '''google/ddpm-cifar10-32'''
_UpperCamelCase : Any = UNetaDModel.from_pretrained(_snake_case )
_UpperCamelCase : Union[str, Any] = PNDMScheduler()
_UpperCamelCase : Tuple = PNDMPipeline(unet=_snake_case , scheduler=_snake_case )
pndm.to(_snake_case )
pndm.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCamelCase : Any = pndm(generator=_snake_case , output_type='''numpy''' ).images
_UpperCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase : Any = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 683 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : Optional[int] = {
"""distilbert-base-uncased""": 512,
"""distilbert-base-uncased-distilled-squad""": 512,
"""distilbert-base-cased""": 512,
"""distilbert-base-cased-distilled-squad""": 512,
"""distilbert-base-german-cased""": 512,
"""distilbert-base-multilingual-cased""": 512,
}
_UpperCAmelCase : Any = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : Dict = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A__ : Union[str, Any] = ['input_ids', 'attention_mask']
A__ : Tuple = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ) -> int:
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
_UpperCamelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _snake_case ) != tokenize_chinese_chars
):
_UpperCamelCase : int = getattr(_snake_case , normalizer_state.pop('''type''' ) )
_UpperCamelCase : Optional[int] = do_lower_case
_UpperCamelCase : Dict = strip_accents
_UpperCamelCase : List[Any] = tokenize_chinese_chars
_UpperCamelCase : Tuple = normalizer_class(**_snake_case )
_UpperCamelCase : Dict = do_lower_case
def _lowercase ( self , _snake_case , _snake_case=None ) -> Optional[int]:
_UpperCamelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , _snake_case , _snake_case = None ) -> List[int]:
_UpperCamelCase : Union[str, Any] = [self.sep_token_id]
_UpperCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
_UpperCamelCase : Optional[Any] = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 683 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A__ : List[str] = KandinskyVaaControlnetImgaImgPipeline
A__ : str = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
A__ : str = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
A__ : Tuple = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
A__ : int = False
@property
def _lowercase ( self ) -> Optional[int]:
return 32
@property
def _lowercase ( self ) -> Union[str, Any]:
return 32
@property
def _lowercase ( self ) -> str:
return self.time_input_dim
@property
def _lowercase ( self ) -> Optional[int]:
return self.time_input_dim * 4
@property
def _lowercase ( self ) -> Any:
return 100
@property
def _lowercase ( self ) -> Any:
torch.manual_seed(0 )
_UpperCamelCase : Tuple = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_UpperCamelCase : Any = UNetaDConditionModel(**_snake_case )
return model
@property
def _lowercase ( self ) -> Any:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowercase ( self ) -> List[Any]:
torch.manual_seed(0 )
_UpperCamelCase : int = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase ( self ) -> Optional[Any]:
_UpperCamelCase : Any = self.dummy_unet
_UpperCamelCase : Optional[Any] = self.dummy_movq
_UpperCamelCase : str = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00_085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
_UpperCamelCase : Optional[Any] = DDIMScheduler(**_snake_case )
_UpperCamelCase : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowercase ( self , _snake_case , _snake_case=0 ) -> int:
_UpperCamelCase : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_snake_case ) ).to(_snake_case )
_UpperCamelCase : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_snake_case )
# create init_image
_UpperCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_snake_case ) ).to(_snake_case )
_UpperCamelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCamelCase : int = Image.fromarray(np.uinta(_snake_case ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
_UpperCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith('''mps''' ):
_UpperCamelCase : Union[str, Any] = torch.manual_seed(_snake_case )
else:
_UpperCamelCase : Any = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_UpperCamelCase : List[str] = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _lowercase ( self ) -> int:
_UpperCamelCase : Optional[Any] = '''cpu'''
_UpperCamelCase : List[Any] = self.get_dummy_components()
_UpperCamelCase : List[Any] = self.pipeline_class(**_snake_case )
_UpperCamelCase : int = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Any = pipe(**self.get_dummy_inputs(_snake_case ) )
_UpperCamelCase : Tuple = output.images
_UpperCamelCase : List[Any] = pipe(
**self.get_dummy_inputs(_snake_case ) , return_dict=_snake_case , )[0]
_UpperCamelCase : str = image[0, -3:, -3:, -1]
_UpperCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase : Any = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
_UpperCamelCase : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_UpperCamelCase : List[Any] = init_image.resize((512, 512) )
_UpperCamelCase : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
_UpperCamelCase : int = torch.from_numpy(np.array(_snake_case ) ).float() / 255.0
_UpperCamelCase : Tuple = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_UpperCamelCase : Dict = '''A robot, 4k photo'''
_UpperCamelCase : Tuple = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_snake_case )
_UpperCamelCase : int = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
_UpperCamelCase : Dict = pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCamelCase, _UpperCamelCase : Any = pipe_prior(
_snake_case , image=_snake_case , strength=0.85 , generator=_snake_case , negative_prompt='''''' , ).to_tuple()
_UpperCamelCase : Optional[int] = pipeline(
image=_snake_case , image_embeds=_snake_case , negative_image_embeds=_snake_case , hint=_snake_case , generator=_snake_case , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
_UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_snake_case , _snake_case )
| 683 |
'''simple docstring'''
def snake_case__ ( UpperCamelCase ) -> list:
_UpperCamelCase : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCamelCase : List[str] = True
for i in range(0 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : int = False
for i in range(1 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : Optional[int] = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase : Optional[int] = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase : Union[str, Any] = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 683 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCAmelCase : Any = TypeVar("""T""")
class UpperCAmelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self , _snake_case ) -> List[Any]:
_UpperCamelCase : int = data
_UpperCamelCase : Node[T] | None = None
def __str__( self ) -> str:
return F'''{self.data}'''
class UpperCAmelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
_UpperCamelCase : Node[T] | None = None
def __iter__( self ) -> Iterator[T]:
_UpperCamelCase : List[Any] = self.top
while node:
yield node.data
_UpperCamelCase : Dict = node.next
def __str__( self ) -> str:
return "->".join([str(_snake_case ) for item in self] )
def __len__( self ) -> int:
return len(tuple(iter(self ) ) )
def _lowercase ( self ) -> bool:
return self.top is None
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : Optional[Any] = Node(_snake_case )
if not self.is_empty():
_UpperCamelCase : Tuple = self.top
_UpperCamelCase : Dict = node
def _lowercase ( self ) -> T:
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , _snake_case )
_UpperCamelCase : int = self.top
_UpperCamelCase : List[str] = self.top.next
return pop_node.data
def _lowercase ( self ) -> T:
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def _lowercase ( self ) -> None:
_UpperCamelCase : str = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 683 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = checkpoint
_UpperCamelCase : int = {}
_UpperCamelCase : int = vae_state_dict['''encoder.conv_in.weight''']
_UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_in.bias''']
_UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_out.weight''']
_UpperCamelCase : Any = vae_state_dict['''encoder.conv_out.bias''']
_UpperCamelCase : List[Any] = vae_state_dict['''encoder.norm_out.weight''']
_UpperCamelCase : str = vae_state_dict['''encoder.norm_out.bias''']
_UpperCamelCase : str = vae_state_dict['''decoder.conv_in.weight''']
_UpperCamelCase : List[Any] = vae_state_dict['''decoder.conv_in.bias''']
_UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.weight''']
_UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.bias''']
_UpperCamelCase : int = vae_state_dict['''decoder.norm_out.weight''']
_UpperCamelCase : Dict = vae_state_dict['''decoder.norm_out.bias''']
_UpperCamelCase : Optional[int] = vae_state_dict['''quant_conv.weight''']
_UpperCamelCase : int = vae_state_dict['''quant_conv.bias''']
_UpperCamelCase : List[Any] = vae_state_dict['''post_quant_conv.weight''']
_UpperCamelCase : Optional[int] = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
_UpperCamelCase : Optional[int] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
_UpperCamelCase : Tuple = {
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
_UpperCamelCase : Any = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
_UpperCamelCase : int = {
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(UpperCamelCase )
}
for i in range(UpperCamelCase ):
_UpperCamelCase : Any = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
_UpperCamelCase : Optional[int] = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
_UpperCamelCase : Dict = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
_UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Union[str, Any] = {'''old''': f'''down.{i}.block''', '''new''': f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : List[str] = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
_UpperCamelCase : Tuple = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
_UpperCamelCase : Optional[int] = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
_UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Tuple = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : Tuple = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
_UpperCamelCase : List[str] = renew_vae_attention_paths(UpperCamelCase )
_UpperCamelCase : Any = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
for i in range(UpperCamelCase ):
_UpperCamelCase : Union[str, Any] = num_up_blocks - 1 - i
_UpperCamelCase : Optional[int] = [
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
_UpperCamelCase : Tuple = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
_UpperCamelCase : Any = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
_UpperCamelCase : Any = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Any = {'''old''': f'''up.{block_id}.block''', '''new''': f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : List[Any] = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
_UpperCamelCase : Optional[Any] = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
_UpperCamelCase : int = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
_UpperCamelCase : Optional[int] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Optional[Any] = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : Tuple = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
_UpperCamelCase : Tuple = renew_vae_attention_paths(UpperCamelCase )
_UpperCamelCase : Dict = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
return new_checkpoint
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,) -> List[str]:
# Only support V1
_UpperCamelCase : Tuple = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
_UpperCamelCase : List[Any] = io.BytesIO(r.content )
_UpperCamelCase : Optional[int] = OmegaConf.load(UpperCamelCase )
_UpperCamelCase : str = 5_12
_UpperCamelCase : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
_UpperCamelCase : str = {}
with safe_open(UpperCamelCase ,framework='''pt''' ,device='''cpu''' ) as f:
for key in f.keys():
_UpperCamelCase : Union[str, Any] = f.get_tensor(UpperCamelCase )
else:
_UpperCamelCase : str = torch.load(UpperCamelCase ,map_location=UpperCamelCase )['''state_dict''']
# Convert the VAE model.
_UpperCamelCase : Dict = create_vae_diffusers_config(UpperCamelCase ,image_size=UpperCamelCase )
_UpperCamelCase : str = custom_convert_ldm_vae_checkpoint(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Dict = AutoencoderKL(**UpperCamelCase )
vae.load_state_dict(UpperCamelCase )
vae.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
_UpperCAmelCase : int = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 683 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
_UpperCAmelCase : Any = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 683 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : str = ['image_processor', 'tokenizer']
A__ : Dict = 'CLIPImageProcessor'
A__ : str = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> List[Any]:
_UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
_UpperCamelCase : Optional[Any] = kwargs.pop('''feature_extractor''' )
_UpperCamelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Dict:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_UpperCamelCase : List[str] = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
_UpperCamelCase : str = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
_UpperCamelCase : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Tuple:
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Any:
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _lowercase ( self ) -> int:
_UpperCamelCase : Optional[int] = self.tokenizer.model_input_names
_UpperCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 683 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : str = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 683 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_UpperCAmelCase : Union[str, Any] = (720, 1280) # Height, Width
_UpperCAmelCase : str = (0.4, 0.6) # if height or width lower than this scale, drop it.
_UpperCAmelCase : Optional[Any] = 1 / 100
_UpperCAmelCase : Optional[Any] = """"""
_UpperCAmelCase : int = """"""
_UpperCAmelCase : Union[str, Any] = """"""
_UpperCAmelCase : List[Any] = 250
def snake_case__ ( ) -> None:
_UpperCamelCase, _UpperCamelCase : List[Any] = get_dataset(UpperCamelCase ,UpperCamelCase )
for index in range(UpperCamelCase ):
_UpperCamelCase : List[str] = random.sample(range(len(UpperCamelCase ) ) ,4 )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = update_image_and_anno(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,filter_scale=UpperCamelCase ,)
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_UpperCamelCase : List[str] = random_chars(32 )
_UpperCamelCase : List[str] = path.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
_UpperCamelCase : Any = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' ,UpperCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
_UpperCamelCase : Any = []
for anno in new_annos:
_UpperCamelCase : List[Any] = anno[3] - anno[1]
_UpperCamelCase : int = anno[4] - anno[2]
_UpperCamelCase : int = anno[1] + width / 2
_UpperCamelCase : int = anno[2] + height / 2
_UpperCamelCase : Optional[Any] = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(UpperCamelCase )
with open(f'''{file_root}.txt''' ,'''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> tuple[list, list]:
_UpperCamelCase : List[str] = []
_UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(UpperCamelCase ,'''*.txt''' ) ):
_UpperCamelCase : int = label_file.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
with open(UpperCamelCase ) as in_file:
_UpperCamelCase : Dict = in_file.readlines()
_UpperCamelCase : Tuple = os.path.join(UpperCamelCase ,f'''{label_name}.jpg''' )
_UpperCamelCase : Tuple = []
for obj_list in obj_lists:
_UpperCamelCase : List[Any] = obj_list.rstrip('''\n''' ).split(''' ''' )
_UpperCamelCase : Tuple = float(obj[1] ) - float(obj[3] ) / 2
_UpperCamelCase : Any = float(obj[2] ) - float(obj[4] ) / 2
_UpperCamelCase : Tuple = float(obj[1] ) + float(obj[3] ) / 2
_UpperCamelCase : List[Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCamelCase )
labels.append(UpperCamelCase )
return img_paths, labels
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = 0.0 ,) -> tuple[list, list, str]:
_UpperCamelCase : Optional[int] = np.zeros([output_size[0], output_size[1], 3] ,dtype=np.uinta )
_UpperCamelCase : str = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = int(scale_x * output_size[1] )
_UpperCamelCase : Dict = int(scale_y * output_size[0] )
_UpperCamelCase : int = []
_UpperCamelCase : Union[str, Any] = []
for i, index in enumerate(UpperCamelCase ):
_UpperCamelCase : Optional[int] = all_img_list[index]
path_list.append(UpperCamelCase )
_UpperCamelCase : str = all_annos[index]
_UpperCamelCase : Tuple = cva.imread(UpperCamelCase )
if i == 0: # top-left
_UpperCamelCase : Any = cva.resize(UpperCamelCase ,(divid_point_x, divid_point_y) )
_UpperCamelCase : Any = img
for bbox in img_annos:
_UpperCamelCase : List[Any] = bbox[1] * scale_x
_UpperCamelCase : Dict = bbox[2] * scale_y
_UpperCamelCase : Any = bbox[3] * scale_x
_UpperCamelCase : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_UpperCamelCase : Union[str, Any] = cva.resize(UpperCamelCase ,(output_size[1] - divid_point_x, divid_point_y) )
_UpperCamelCase : List[Any] = img
for bbox in img_annos:
_UpperCamelCase : Any = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Optional[Any] = bbox[2] * scale_y
_UpperCamelCase : Any = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : Optional[int] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_UpperCamelCase : Dict = cva.resize(UpperCamelCase ,(divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : List[str] = img
for bbox in img_annos:
_UpperCamelCase : int = bbox[1] * scale_x
_UpperCamelCase : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : int = bbox[3] * scale_x
_UpperCamelCase : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_UpperCamelCase : Dict = cva.resize(
UpperCamelCase ,(output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : Union[str, Any] = img
for bbox in img_annos:
_UpperCamelCase : Optional[int] = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_UpperCamelCase : Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case__ ( UpperCamelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_UpperCamelCase : Tuple = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase ) for _ in range(UpperCamelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 683 | 1 |
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
_UpperCAmelCase : List[str] = """src/transformers"""
# Matches is_xxx_available()
_UpperCAmelCase : str = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
_UpperCAmelCase : Any = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_UpperCAmelCase : str = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
_UpperCAmelCase : str = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
_UpperCAmelCase : Optional[Any] = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_UpperCAmelCase : List[Any] = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
_UpperCAmelCase : Optional[Any] = re.compile(R"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
_UpperCAmelCase : Any = re.compile(R"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
_UpperCAmelCase : List[str] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
_UpperCAmelCase : List[Any] = re.compile(R"""^\s*try:""")
# Catches a line with else:
_UpperCAmelCase : List[Any] = re.compile(R"""^\s*else:""")
def snake_case__ ( UpperCamelCase ) -> List[str]:
if _re_test_backend.search(UpperCamelCase ) is None:
return None
_UpperCamelCase : List[str] = [b[0] for b in _re_backend.findall(UpperCamelCase )]
backends.sort()
return "_and_".join(UpperCamelCase )
def snake_case__ ( UpperCamelCase ) -> Optional[Any]:
with open(UpperCamelCase ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_UpperCamelCase : int = f.readlines()
_UpperCamelCase : str = 0
while line_index < len(UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
_UpperCamelCase : Optional[int] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
_UpperCamelCase : str = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase ):
_UpperCamelCase : Union[str, Any] = _re_one_line_import_struct.search(UpperCamelCase ).groups()[0]
_UpperCamelCase : List[str] = re.findall(r'''\[([^\]]+)\]''' ,UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
_UpperCamelCase : int = _re_import_struct_key_value.search(UpperCamelCase )
if single_line_import_search is not None:
_UpperCamelCase : int = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
_UpperCamelCase : List[str] = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_UpperCamelCase : Union[str, Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCamelCase : List[str] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCamelCase : Optional[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
_UpperCamelCase : List[str] = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase ) is not None:
_UpperCamelCase : Optional[Any] = _re_import_struct_add_many.search(UpperCamelCase ).groups()[0].split(''', ''' )
_UpperCamelCase : Optional[int] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif _re_between_brackets.search(UpperCamelCase ) is not None:
_UpperCamelCase : str = _re_between_brackets.search(UpperCamelCase ).groups()[0].split(''', ''' )
_UpperCamelCase : Union[str, Any] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif _re_quote_object.search(UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
_UpperCamelCase : str = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_UpperCamelCase : Union[str, Any] = []
while (
line_index < len(UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
_UpperCamelCase : List[str] = lines[line_index]
_UpperCamelCase : Any = _re_import.search(UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_UpperCamelCase : List[str] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
_UpperCamelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCamelCase : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCamelCase : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
_UpperCamelCase : Optional[int] = lines[line_index]
_UpperCamelCase : Optional[Any] = _re_import.search(UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_UpperCamelCase : int = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Optional[Any]:
def find_duplicates(UpperCamelCase ):
return [k for k, v in collections.Counter(UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_UpperCamelCase : Tuple = []
for key in import_dict_objects.keys():
_UpperCamelCase : Dict = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_UpperCamelCase : Any = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_UpperCamelCase : Tuple = '''base imports''' if key == '''none''' else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def snake_case__ ( ) -> int:
_UpperCamelCase : Dict = []
for root, _, files in os.walk(UpperCamelCase ):
if "__init__.py" in files:
_UpperCamelCase : int = os.path.join(UpperCamelCase ,'''__init__.py''' )
_UpperCamelCase : str = parse_init(UpperCamelCase )
if objects is not None:
_UpperCamelCase : int = analyze_results(*UpperCamelCase )
if len(UpperCamelCase ) > 0:
_UpperCamelCase : List[str] = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(UpperCamelCase ) )
if len(UpperCamelCase ) > 0:
raise ValueError('''\n\n'''.join(UpperCamelCase ) )
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : str = []
for path, directories, files in os.walk(UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
_UpperCamelCase : List[str] = str((Path(UpperCamelCase ) / folder).relative_to(UpperCamelCase ) )
_UpperCamelCase : Union[str, Any] = short_path.replace(os.path.sep ,'''.''' )
submodules.append(UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
_UpperCamelCase : int = str((Path(UpperCamelCase ) / fname).relative_to(UpperCamelCase ) )
_UpperCamelCase : List[Any] = short_path.replace('''.py''' ,'''''' ).replace(os.path.sep ,'''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(UpperCamelCase )
return submodules
_UpperCAmelCase : Optional[Any] = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def snake_case__ ( ) -> Optional[int]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
_UpperCamelCase : str = direct_transformers_import(UpperCamelCase )
_UpperCamelCase : str = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(UpperCamelCase ,'''__init__.py''' ) ,'''r''' ) as f:
_UpperCamelCase : int = f.read()
import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' ,UpperCamelCase ) ) )
_UpperCamelCase : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(UpperCamelCase ) > 0:
_UpperCamelCase : str = '''\n'''.join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
f'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 683 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCAmelCase ( a_ ):
"""simple docstring"""
@slow
@require_torch
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
_UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_UpperCamelCase : Optional[Any] = bertabert.config.encoder.vocab_size
_UpperCamelCase : List[str] = tokenizer.sep_token_id
_UpperCamelCase : List[str] = tokenizer.cls_token_id
_UpperCamelCase : Optional[Any] = 128
_UpperCamelCase : int = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
_UpperCamelCase : Dict = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
_UpperCamelCase : Dict = train_dataset.select(range(32 ) )
_UpperCamelCase : Tuple = val_dataset.select(range(16 ) )
_UpperCamelCase : Union[str, Any] = 4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCamelCase : Optional[Any] = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_snake_case , max_length=512 )
_UpperCamelCase : Optional[int] = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_snake_case , max_length=128 )
_UpperCamelCase : str = inputs.input_ids
_UpperCamelCase : Union[str, Any] = inputs.attention_mask
_UpperCamelCase : str = outputs.input_ids
_UpperCamelCase : str = outputs.input_ids.copy()
_UpperCamelCase : Tuple = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
_UpperCamelCase : Union[str, Any] = outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
_UpperCamelCase : Dict = pred.label_ids
_UpperCamelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_UpperCamelCase : Any = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCamelCase : Dict = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCamelCase : int = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
_UpperCamelCase : Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
_UpperCamelCase : List[Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
_UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_UpperCamelCase : Union[str, Any] = SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy='''steps''' , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCamelCase : Optional[int] = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train()
| 683 | 1 |
'''simple docstring'''
import math
def snake_case__ ( UpperCamelCase ,UpperCamelCase = 0 ,UpperCamelCase = 0 ) -> list:
_UpperCamelCase : List[str] = end or len(UpperCamelCase )
for i in range(UpperCamelCase ,UpperCamelCase ):
_UpperCamelCase : int = i
_UpperCamelCase : List[Any] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_UpperCamelCase : int = array[temp_index - 1]
temp_index -= 1
_UpperCamelCase : Optional[int] = temp_index_value
return array
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> None: # Max Heap
_UpperCamelCase : List[Any] = index
_UpperCamelCase : List[str] = 2 * index + 1 # Left Node
_UpperCamelCase : Union[str, Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_UpperCamelCase : str = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_UpperCamelCase : int = right_index
if largest != index:
_UpperCamelCase, _UpperCamelCase : str = array[largest], array[index]
heapify(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
def snake_case__ ( UpperCamelCase ) -> list:
_UpperCamelCase : Union[str, Any] = len(UpperCamelCase )
for i in range(n // 2 ,-1 ,-1 ):
heapify(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
for i in range(n - 1 ,0 ,-1 ):
_UpperCamelCase, _UpperCamelCase : int = array[0], array[i]
heapify(UpperCamelCase ,0 ,UpperCamelCase )
return array
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
_UpperCamelCase : List[str] = low
_UpperCamelCase : Union[str, Any] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_UpperCamelCase, _UpperCamelCase : List[str] = array[j], array[i]
i += 1
def snake_case__ ( UpperCamelCase ) -> list:
if len(UpperCamelCase ) == 0:
return array
_UpperCamelCase : Optional[Any] = 2 * math.ceil(math.loga(len(UpperCamelCase ) ) )
_UpperCamelCase : Union[str, Any] = 16
return intro_sort(UpperCamelCase ,0 ,len(UpperCamelCase ) ,UpperCamelCase ,UpperCamelCase )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(UpperCamelCase )
max_depth -= 1
_UpperCamelCase : List[str] = median_of_a(UpperCamelCase ,UpperCamelCase ,start + ((end - start) // 2) + 1 ,end - 1 )
_UpperCamelCase : int = partition(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
intro_sort(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : int = p
return insertion_sort(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : str = input("""Enter numbers separated by a comma : """).strip()
_UpperCAmelCase : Any = [float(item) for item in user_input.split(""",""")]
print(sort(unsorted))
| 683 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def snake_case__ ( UpperCamelCase=None ) -> Optional[int]:
if subparsers is not None:
_UpperCamelCase : Dict = subparsers.add_parser('''env''' )
else:
_UpperCamelCase : Tuple = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' ,default=UpperCamelCase ,help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase )
return parser
def snake_case__ ( UpperCamelCase ) -> Any:
_UpperCamelCase : int = torch.__version__
_UpperCamelCase : int = torch.cuda.is_available()
_UpperCamelCase : List[str] = is_xpu_available()
_UpperCamelCase : Dict = is_npu_available()
_UpperCamelCase : Optional[Any] = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCamelCase ):
_UpperCamelCase : List[str] = load_config_from_file(args.config_file ).to_dict()
_UpperCamelCase : List[Any] = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''PyTorch XPU available''': str(UpperCamelCase ),
'''PyTorch NPU available''': str(UpperCamelCase ),
'''System RAM''': f'''{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB''',
}
if pt_cuda_available:
_UpperCamelCase : int = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([f'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
_UpperCamelCase : Union[str, Any] = (
'''\n'''.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase ,UpperCamelCase )
else f'''\t{accelerate_config}'''
)
print(UpperCamelCase )
_UpperCamelCase : str = accelerate_config
return info
def snake_case__ ( ) -> int:
_UpperCamelCase : str = env_command_parser()
_UpperCamelCase : Any = parser.parse_args()
env_command(UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 683 | 1 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : Optional[int] = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = AlbertTokenizer
A__ : Tuple = AlbertTokenizerFast
A__ : Optional[Any] = True
A__ : int = True
A__ : List[Any] = True
def _lowercase ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase : Optional[Any] = AlbertTokenizer(_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self , _snake_case ) -> int:
_UpperCamelCase : int = '''this is a test'''
_UpperCamelCase : Optional[Any] = '''this is a test'''
return input_text, output_text
def _lowercase ( self ) -> Optional[Any]:
_UpperCamelCase : List[str] = '''<pad>'''
_UpperCamelCase : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def _lowercase ( self ) -> Union[str, Any]:
_UpperCamelCase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''▁eloquent''' )
self.assertEqual(len(_snake_case ) , 30000 )
def _lowercase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _lowercase ( self ) -> Dict:
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : Tuple = self.get_rust_tokenizer()
_UpperCamelCase : Dict = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase : Union[str, Any] = tokenizer.tokenize(_snake_case )
_UpperCamelCase : str = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_UpperCamelCase : List[str] = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
_UpperCamelCase : str = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_UpperCamelCase : str = self.get_rust_tokenizer()
_UpperCamelCase : Tuple = tokenizer.encode(_snake_case )
_UpperCamelCase : Any = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Dict = AlbertTokenizer(_snake_case , keep_accents=_snake_case )
_UpperCamelCase : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_snake_case , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [48, 25, 21, 1289] )
_UpperCamelCase : str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_snake_case , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )
_UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(_snake_case , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
_UpperCamelCase : Dict = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , )
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : int = AlbertTokenizer(_snake_case )
_UpperCamelCase : int = tokenizer.encode('''sequence builders''' )
_UpperCamelCase : List[Any] = tokenizer.encode('''multi-sequence build''' )
_UpperCamelCase : str = tokenizer.build_inputs_with_special_tokens(_snake_case )
_UpperCamelCase : str = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowercase ( self ) -> str:
# fmt: off
_UpperCamelCase : str = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
| 683 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
def snake_case__ ( UpperCamelCase ) -> Tuple:
_UpperCamelCase : str = '''huggingface/label-files'''
_UpperCamelCase : Optional[Any] = '''imagenet-1k-id2label.json'''
_UpperCamelCase : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase ,UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
_UpperCamelCase : Optional[int] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_UpperCamelCase : Dict = {v: k for k, v in idalabel.items()}
_UpperCamelCase : Optional[Any] = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_UpperCamelCase : Union[str, Any] = BitConfig(
conv_layer=UpperCamelCase ,num_labels=10_00 ,idalabel=UpperCamelCase ,labelaid=UpperCamelCase ,)
return config
def snake_case__ ( UpperCamelCase ) -> str:
if "stem.conv" in name:
_UpperCamelCase : Any = name.replace('''stem.conv''' ,'''bit.embedder.convolution''' )
if "blocks" in name:
_UpperCamelCase : Union[str, Any] = name.replace('''blocks''' ,'''layers''' )
if "head.fc" in name:
_UpperCamelCase : Optional[Any] = name.replace('''head.fc''' ,'''classifier.1''' )
if name.startswith('''norm''' ):
_UpperCamelCase : Any = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
_UpperCamelCase : List[Any] = '''bit.encoder.''' + name
return name
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase : List[str] = Image.open(requests.get(UpperCamelCase ,stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[Any]:
_UpperCamelCase : str = get_config(UpperCamelCase )
# load original model from timm
_UpperCamelCase : int = create_model(UpperCamelCase ,pretrained=UpperCamelCase )
timm_model.eval()
# load state_dict of original model
_UpperCamelCase : int = timm_model.state_dict()
for key in state_dict.copy().keys():
_UpperCamelCase : int = state_dict.pop(UpperCamelCase )
_UpperCamelCase : Any = val.squeeze() if '''head''' in key else val
# load HuggingFace model
_UpperCamelCase : List[str] = BitForImageClassification(UpperCamelCase )
model.eval()
model.load_state_dict(UpperCamelCase )
# create image processor
_UpperCamelCase : Optional[int] = create_transform(**resolve_data_config({} ,model=UpperCamelCase ) )
_UpperCamelCase : Any = transform.transforms
_UpperCamelCase : List[str] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
_UpperCamelCase : List[str] = BitImageProcessor(
do_resize=UpperCamelCase ,size={'''shortest_edge''': timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=UpperCamelCase ,crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} ,do_normalize=UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCamelCase : str = prepare_img()
_UpperCamelCase : Dict = transform(UpperCamelCase ).unsqueeze(0 )
_UpperCamelCase : Dict = processor(UpperCamelCase ,return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(UpperCamelCase ,UpperCamelCase )
# verify logits
with torch.no_grad():
_UpperCamelCase : Optional[int] = model(UpperCamelCase )
_UpperCamelCase : Optional[int] = outputs.logits
print('''Logits:''' ,logits[0, :3] )
print('''Predicted class:''' ,model.config.idalabel[logits.argmax(-1 ).item()] )
_UpperCamelCase : List[Any] = timm_model(UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase ,outputs.logits ,atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
_UpperCAmelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 683 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCamelCase : Any = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def _lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def _lowercase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase : Optional[int] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
_UpperCamelCase : int = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Tuple = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_UpperCamelCase : int = DDPMScheduler()
_UpperCamelCase : Optional[int] = AudioDiffusionPipeline(vqvae=_snake_case , unet=self.dummy_unet , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case , steps=4 )
_UpperCamelCase : Union[str, Any] = output.audios[0]
_UpperCamelCase : Union[str, Any] = output.images[0]
_UpperCamelCase : str = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : int = pipe(generator=_snake_case , steps=4 , return_dict=_snake_case )
_UpperCamelCase : int = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_UpperCamelCase : List[str] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : List[str] = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : int = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_UpperCamelCase : Dict = DDIMScheduler()
_UpperCamelCase : str = self.dummy_vqvae_and_unet
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : Optional[Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_UpperCamelCase : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Tuple = pipe(raw_audio=_snake_case , generator=_snake_case , start_step=5 , steps=10 )
_UpperCamelCase : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_UpperCamelCase : Any = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Tuple = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : Any = self.dummy_unet_condition
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_snake_case , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : Union[str, Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : int = torch.rand((1, 1, 10) )
_UpperCamelCase : Optional[Any] = pipe(generator=_snake_case , encoding=_snake_case )
_UpperCamelCase : Dict = output.images[0]
_UpperCamelCase : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Any = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = torch_device
_UpperCamelCase : int = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
_UpperCamelCase : str = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case )
_UpperCamelCase : List[Any] = output.audios[0]
_UpperCamelCase : List[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_UpperCamelCase : Union[str, Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Union[str, Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 683 |
'''simple docstring'''
_UpperCAmelCase : Any = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def snake_case__ ( UpperCamelCase ) -> int:
_UpperCamelCase : Any = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_UpperCAmelCase : list[bool | None] = [None] * 10000000
_UpperCAmelCase : str = True
_UpperCAmelCase : Tuple = False
def snake_case__ ( UpperCamelCase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_UpperCamelCase : List[str] = chain(next_number(UpperCamelCase ) )
_UpperCamelCase : Tuple = number_chain
while number < 10_00_00_00:
_UpperCamelCase : int = number_chain
number *= 10
return number_chain
def snake_case__ ( UpperCamelCase = 10_00_00_00 ) -> int:
for i in range(1 ,UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 683 | 1 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(snake_case, snake_case )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[int] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
__magic_name__ :Tuple = s_dict.pop(snake_case )
elif "subsample" in key:
__magic_name__ :Optional[int] = s_dict.pop(snake_case )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Optional[int] = emb.weight.shape
__magic_name__ :Optional[Any] = nn.Linear(snake_case, snake_case, bias=snake_case )
__magic_name__ :List[str] = emb.weight.data
return lin_layer
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[Any] = torch.load(snake_case, map_location='''cpu''' )
__magic_name__ :List[Any] = mam_aaa['''args''']
__magic_name__ :List[str] = mam_aaa['''model''']
__magic_name__ :Optional[Any] = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(snake_case )
rename_keys(snake_case )
__magic_name__ :List[Any] = state_dict['''decoder.embed_tokens.weight'''].shape[0]
__magic_name__ :Union[str, Any] = args.share_decoder_input_output_embed
__magic_name__ :Any = [int(snake_case ) for i in args.conv_kernel_sizes.split(''',''' )]
__magic_name__ :List[Any] = SpeechaTextConfig(
vocab_size=snake_case, max_source_positions=args.max_source_positions, max_target_positions=args.max_target_positions, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', num_conv_layers=len(snake_case ), conv_channels=args.conv_channels, conv_kernel_sizes=snake_case, input_feat_per_channel=args.input_feat_per_channel, input_channels=args.input_channels, tie_word_embeddings=snake_case, num_beams=5, max_length=2_0_0, use_cache=snake_case, decoder_start_token_id=2, early_stopping=snake_case, )
__magic_name__ :str = SpeechaTextForConditionalGeneration(snake_case )
__magic_name__ , __magic_name__ :List[Any] = model.model.load_state_dict(snake_case, strict=snake_case )
if len(snake_case ) > 0 and not set(snake_case ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
__magic_name__ :Tuple = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__magic_name__ :Optional[Any] = lm_head_weights
model.save_pretrained(snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--fairseq_path""", type=str, help="""Path to the fairseq model (.pt) file.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 0 |
'''simple docstring'''
_UpperCAmelCase : str = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : List[str] = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> str:
assert len(str(UpperCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_UpperCamelCase : Any = year // 1_00
_UpperCamelCase : List[Any] = (5 * (century % 4) + 2) % 7
_UpperCamelCase : Tuple = year % 1_00
_UpperCamelCase : Optional[int] = centurian % 12
_UpperCamelCase : Tuple = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_UpperCamelCase : List[Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_UpperCamelCase : Optional[int] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __lowerCamelCase (_a ):
_lowercase = None
_lowercase = None
_lowercase = None
_lowercase = None
class __lowerCamelCase (_a ):
def __init__( self: Optional[Any],A_: str=1,A_: int=0,A_: Tuple=2,A_: Optional[int]=512,A_: Tuple="cls",A_: Any=False,A_: int=True,**A_: Optional[Any],):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = project_dim
__UpperCamelCase = pooler_fn
__UpperCamelCase = learn_encoder
__UpperCamelCase = use_attention_mask
class __lowerCamelCase (_a ):
_lowercase = [R"""pooler""", R"""logit_scale"""]
_lowercase = [R"""position_ids""", R"""predictions.decoder.bias"""]
_lowercase = """roberta"""
_lowercase = RobertaSeriesConfig
def __init__( self: int,A_: List[str] ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = XLMRobertaModel(A_ )
__UpperCamelCase = nn.Linear(config.hidden_size,config.project_dim )
__UpperCamelCase = getattr(A_,'has_pre_transformation',A_ )
if self.has_pre_transformation:
__UpperCamelCase = nn.Linear(config.hidden_size,config.project_dim )
__UpperCamelCase = nn.LayerNorm(config.hidden_size,eps=config.layer_norm_eps )
self.post_init()
def snake_case_ ( self: List[Any],A_: Optional[torch.Tensor] = None,A_: Optional[torch.Tensor] = None,A_: Optional[torch.Tensor] = None,A_: Optional[torch.Tensor] = None,A_: Optional[torch.Tensor] = None,A_: Optional[torch.Tensor] = None,A_: Optional[torch.Tensor] = None,A_: Optional[torch.Tensor] = None,A_: Optional[bool] = None,A_: Optional[bool] = None,A_: Optional[bool] = None,):
'''simple docstring'''
__UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase = self.base_model(
input_ids=A_,attention_mask=A_,token_type_ids=A_,position_ids=A_,head_mask=A_,inputs_embeds=A_,encoder_hidden_states=A_,encoder_attention_mask=A_,output_attentions=A_,output_hidden_states=True if self.has_pre_transformation else output_hidden_states,return_dict=A_,)
if self.has_pre_transformation:
__UpperCamelCase = outputs['hidden_states'][-2]
__UpperCamelCase = self.pre_LN(A_ )
__UpperCamelCase = self.transformation_pre(A_ )
return TransformationModelOutput(
projection_state=A_,last_hidden_state=outputs.last_hidden_state,hidden_states=outputs.hidden_states,attentions=outputs.attentions,)
else:
__UpperCamelCase = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=A_,last_hidden_state=outputs.last_hidden_state,hidden_states=outputs.hidden_states,attentions=outputs.attentions,)
| 1 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
"""simple docstring"""
@staticmethod
def _lowercase ( *_snake_case , **_snake_case ) -> str:
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _lowercase ( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
_UpperCamelCase : int = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Any = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def _lowercase ( self , _snake_case , _snake_case ) -> List[str]:
_UpperCamelCase : int = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
] , )
@require_torch
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[int] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
_UpperCamelCase : List[Any] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
@slow
@require_torch
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[Any] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : str = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : Dict = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def _lowercase ( self ) -> List[Any]:
pass
| 683 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = """
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def SCREAMING_SNAKE_CASE_ ( _snake_case :Dict , _snake_case :List[Any] , _snake_case :str=8 ) -> str:
_A = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_A = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def SCREAMING_SNAKE_CASE_ ( _snake_case :Dict , _snake_case :Tuple=512 , _snake_case :Any=512 ) -> Optional[Any]:
_A = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_A = np.array(pil_image.convert('''RGB''' ) )
_A = arr.astype(np.floataa ) / 127.5 - 1
_A = np.transpose(_snake_case , [2, 0, 1] )
_A = torch.from_numpy(_snake_case ).unsqueeze(0 )
return image
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCAmelCase : UNetaDConditionModel , __lowerCAmelCase : DDPMScheduler , __lowerCAmelCase : VQModel , ) -> Tuple:
super().__init__()
self.register_modules(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , movq=__lowerCAmelCase , )
_A = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case_ ( self : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> Dict:
# get the original timestep using init_timestep
_A = min(int(num_inference_steps * strength ) , __lowerCAmelCase )
_A = max(num_inference_steps - init_timestep , 0 )
_A = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case_ ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=None ) -> List[Any]:
if not isinstance(__lowerCAmelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCAmelCase )}''' )
_A = image.to(device=__lowerCAmelCase , dtype=__lowerCAmelCase )
_A = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_A = image
else:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_A = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCAmelCase )
]
_A = torch.cat(__lowerCAmelCase , dim=0 )
else:
_A = self.movq.encode(__lowerCAmelCase ).latent_dist.sample(__lowerCAmelCase )
_A = self.movq.config.scaling_factor * init_latents
_A = torch.cat([init_latents] , dim=0 )
_A = init_latents.shape
_A = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=__lowerCAmelCase , dtype=__lowerCAmelCase )
# get latents
_A = self.scheduler.add_noise(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_A = init_latents
return latents
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : str=0 ) -> Tuple:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_A = torch.device(f'''cuda:{gpu_id}''' )
_A = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : int , __lowerCAmelCase : str=0 ) -> List[str]:
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_A = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_A = None
for cpu_offloaded_model in [self.unet, self.movq]:
_A , _A = cpu_offload_with_hook(__lowerCAmelCase , __lowerCAmelCase , prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
_A = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case_ ( self : Union[str, Any] ) -> Any:
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self : Any , __lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCAmelCase : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , __lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCAmelCase : int = 5_12 , __lowerCAmelCase : int = 5_12 , __lowerCAmelCase : int = 1_00 , __lowerCAmelCase : float = 4.0 , __lowerCAmelCase : float = 0.3 , __lowerCAmelCase : int = 1 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , ) -> List[Any]:
_A = self._execution_device
_A = guidance_scale > 1.0
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_A = torch.cat(__lowerCAmelCase , dim=0 )
_A = image_embeds.shape[0]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_A = torch.cat(__lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
_A = image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
_A = negative_image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
_A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__lowerCAmelCase )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_A = [image]
if not all(isinstance(__lowerCAmelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f'''Input is in incorrect format: {[type(__lowerCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
_A = torch.cat([prepare_image(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for i in image] , dim=0 )
_A = image.to(dtype=image_embeds.dtype , device=__lowerCAmelCase )
_A = self.movq.encode(__lowerCAmelCase )['''latents''']
_A = latents.repeat_interleave(__lowerCAmelCase , dim=0 )
self.scheduler.set_timesteps(__lowerCAmelCase , device=__lowerCAmelCase )
_A , _A = self.get_timesteps(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_A = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_A , _A = downscale_height_and_width(__lowerCAmelCase , __lowerCAmelCase , self.movq_scale_factor )
_A = self.prepare_latents(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , image_embeds.dtype , __lowerCAmelCase , __lowerCAmelCase )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A = {'''image_embeds''': image_embeds}
_A = self.unet(
sample=__lowerCAmelCase , timestep=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , added_cond_kwargs=__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0]
if do_classifier_free_guidance:
_A , _A = noise_pred.split(latents.shape[1] , dim=1 )
_A , _A = noise_pred.chunk(2 )
_A , _A = variance_pred.chunk(2 )
_A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_A = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_A , _A = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_A = self.scheduler.step(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase , )[0]
# post-processing
_A = self.movq.decode(__lowerCAmelCase , force_not_quantize=__lowerCAmelCase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_A = image * 0.5 + 0.5
_A = image.clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_A = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 2 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_UpperCAmelCase : Tuple = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 683 | 0 |
'''simple docstring'''
def A_( A : str):
UpperCamelCase = [0] * len(A)
for i in range(1 , len(A)):
# use last results for better performance - dynamic programming
UpperCamelCase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
UpperCamelCase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
UpperCamelCase = j
return prefix_result
def A_( A : str):
return max(prefix_function(A))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase="attention" ) -> List[str]:
_UpperCamelCase : Dict = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
_UpperCamelCase : int = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_UpperCamelCase : str = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
_UpperCamelCase : Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_UpperCamelCase : Any = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
_UpperCamelCase : Optional[int] = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_UpperCamelCase : Optional[Any] = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
_UpperCamelCase : List[Any] = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[str]:
if split_mlp_wi:
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
_UpperCamelCase : Tuple = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
_UpperCamelCase : Optional[Any] = (wi_a, wi_a)
else:
_UpperCamelCase : str = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def snake_case__ ( UpperCamelCase ,*, UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ) -> int:
_UpperCamelCase : Any = traverse_util.flatten_dict(variables['''target'''] )
_UpperCamelCase : Optional[Any] = {'''/'''.join(UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_UpperCamelCase : str = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,UpperCamelCase )
_UpperCamelCase : Optional[int] = collections.OrderedDict()
# Shared embeddings.
_UpperCamelCase : str = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''attention''' )
_UpperCamelCase : Tuple = layer_norm
_UpperCamelCase : int = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : Dict = v.T
# Block i, layer 1 (MLP).
_UpperCamelCase : Dict = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : int = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,UpperCamelCase )
_UpperCamelCase : Union[str, Any] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Optional[Any] = wi[1].T
else:
_UpperCamelCase : List[Any] = wi.T
_UpperCamelCase : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : Union[str, Any] = tax_relpos_bias_lookup(
UpperCamelCase ,UpperCamelCase ,'''encoder''' ).T
_UpperCamelCase : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_UpperCamelCase : List[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''encoder''' ).T
_UpperCamelCase : Optional[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''self_attention''' )
_UpperCamelCase : int = layer_norm
_UpperCamelCase : Union[str, Any] = k.T
_UpperCamelCase : Optional[int] = o.T
_UpperCamelCase : Dict = q.T
_UpperCamelCase : Tuple = v.T
# Block i, layer 1 (Cross Attention).
_UpperCamelCase : str = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''encoder_decoder_attention''' )
_UpperCamelCase : Dict = layer_norm
_UpperCamelCase : Optional[int] = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : str = v.T
# Block i, layer 2 (MLP).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : List[Any] = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,UpperCamelCase )
_UpperCamelCase : List[str] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Union[str, Any] = wi[1].T
else:
_UpperCamelCase : Dict = wi.T
_UpperCamelCase : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : int = tax_relpos_bias_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ).T
_UpperCamelCase : Optional[int] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_UpperCamelCase : str = old['''decoder/logits_dense/kernel'''].T
return new
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
_UpperCamelCase : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : str = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : int = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_UpperCamelCase : Any = state_dict['''shared.weight''']
return state_dict
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Any:
_UpperCamelCase : List[Any] = checkpoints.load_tax_checkpoint(UpperCamelCase )
_UpperCamelCase : str = convert_tax_to_pytorch(
UpperCamelCase ,num_layers=config.num_layers ,is_encoder_only=UpperCamelCase ,scalable_attention=UpperCamelCase )
_UpperCamelCase : Optional[Any] = make_state_dict(UpperCamelCase ,UpperCamelCase )
model.load_state_dict(UpperCamelCase ,strict=UpperCamelCase )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,UpperCamelCase = False ,) -> int:
_UpperCamelCase : int = MTaConfig.from_json_file(UpperCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_UpperCamelCase : Optional[int] = UMTaEncoderModel(UpperCamelCase )
else:
_UpperCamelCase : Optional[int] = UMTaForConditionalGeneration(UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCamelCase )
print('''Done''' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 683 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : List[Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_UpperCAmelCase : int = 100
_UpperCAmelCase : List[Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_UpperCAmelCase : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def snake_case__ ( UpperCamelCase ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_UpperCamelCase : set[int] = set()
_UpperCamelCase : int
_UpperCamelCase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def snake_case__ ( UpperCamelCase = 50_00 ) -> int | None:
for number_to_partition in range(1 ,UpperCamelCase ):
if len(partition(UpperCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 683 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""nielsr/canine-s""": 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_lowercase = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_lowercase = 0
_lowercase = 0xE0_00
_lowercase = 0xE0_01
_lowercase = 0xE0_02
_lowercase = 0xE0_03
_lowercase = 0xE0_04
# Maps special codepoints to human-readable names.
_lowercase = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_lowercase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _lowercase=chr(_lowercase ) , _lowercase=chr(_lowercase ) , _lowercase=chr(_lowercase ) , _lowercase=chr(_lowercase ) , _lowercase=chr(_lowercase ) , _lowercase=chr(_lowercase ) , _lowercase=False , _lowercase=2_048 , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
_lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
_lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
_lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
_lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
bos_token=_lowercase , eos_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , model_max_length=_lowercase , **_lowercase , )
# Creates a mapping for looking up the IDs of special symbols.
_lowerCAmelCase = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
_lowerCAmelCase = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
_lowerCAmelCase = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
_lowerCAmelCase = UNICODE_VOCAB_SIZE
_lowerCAmelCase = len(self._special_codepoints )
@property
def _lowercase ( self ):
"""simple docstring"""
return self._unicode_vocab_size
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return list(_lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
try:
return ord(_lowercase )
except TypeError:
raise ValueError(F'invalid token: \'{token}\'' )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(_lowercase )
except TypeError:
raise ValueError(F'invalid id: {index}' )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return "".join(_lowercase )
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
_lowerCAmelCase = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _lowercase ( self , _lowercase , _lowercase = None , _lowercase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
_lowerCAmelCase = [1] + ([0] * len(_lowercase )) + [1]
if token_ids_a is not None:
result += ([0] * len(_lowercase )) + [1]
return result
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
_lowerCAmelCase = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
return ()
| 5 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_UpperCAmelCase : Dict = """bart"""
_UpperCAmelCase : List[str] = True
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> int:
if LOAD_DENSE_INDEX:
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase : Tuple = qar_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase : Any = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase : Dict = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase : Tuple = sas_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = make_qa_sas_model(
model_name='''t5-small''' ,from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' ,device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> List[Any]:
if LOAD_DENSE_INDEX:
_UpperCamelCase : str = faiss.StandardGpuResources()
_UpperCamelCase : Optional[int] = datasets.load_dataset(path='''wiki_snippets''' ,name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase : List[str] = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(wikiaab_passages.num_rows, 1_28) ,)
_UpperCamelCase : Any = faiss.IndexFlatIP(1_28 )
_UpperCamelCase : str = faiss.index_cpu_to_gpu(UpperCamelCase ,1 ,UpperCamelCase )
wikiaab_gpu_index_flat.add(UpperCamelCase ) # TODO fix for larger GPU
else:
_UpperCamelCase, _UpperCamelCase : Optional[int] = (None, None)
_UpperCamelCase : int = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : List[Any] = datasets.load_dataset('''eli5''' ,name='''LFQA_reddit''' )
_UpperCamelCase : Optional[int] = elia['''train_eli5''']
_UpperCamelCase : Any = np.memmap(
'''eli5_questions_reps.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(elia_train.num_rows, 1_28) )
_UpperCamelCase : Optional[Any] = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(UpperCamelCase )
return (elia_train, eli5_train_q_index)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_indexes()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_models()
_UpperCAmelCase , _UpperCAmelCase : int = load_train_data()
def snake_case__ ( UpperCamelCase ,UpperCamelCase=10 ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = embed_questions_for_retrieval([question] ,UpperCamelCase ,UpperCamelCase )
_UpperCamelCase, _UpperCamelCase : Optional[Any] = eli5_train_q_index.search(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = [elia_train[int(UpperCamelCase )] for i in I[0]]
return nn_examples
def snake_case__ ( UpperCamelCase ,UpperCamelCase="wiki40b" ,UpperCamelCase="dense" ,UpperCamelCase=10 ) -> Optional[int]:
if source == "none":
_UpperCamelCase, _UpperCamelCase : Dict = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase, _UpperCamelCase : str = query_qa_dense_index(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
else:
_UpperCamelCase, _UpperCamelCase : str = query_es_index(
UpperCamelCase ,UpperCamelCase ,index_name='''english_wiki40b_snippets_100w''' ,n_results=UpperCamelCase ,)
_UpperCamelCase : Optional[int] = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase : Optional[Any] = '''question: {} context: {}'''.format(UpperCamelCase ,UpperCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCamelCase : None),
} )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=64 ,UpperCamelCase=2_56 ,UpperCamelCase=False ,UpperCamelCase=2 ,UpperCamelCase=0.95 ,UpperCamelCase=0.8 ) -> Optional[Any]:
with torch.no_grad():
_UpperCamelCase : Any = qa_sas_generate(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,num_answers=1 ,num_beams=UpperCamelCase ,min_len=UpperCamelCase ,max_len=UpperCamelCase ,do_sample=UpperCamelCase ,temp=UpperCamelCase ,top_p=UpperCamelCase ,top_k=UpperCamelCase ,max_input_length=10_24 ,device='''cuda:0''' ,)[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_UpperCAmelCase : str = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_UpperCAmelCase : Tuple = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_UpperCAmelCase : Dict = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_UpperCAmelCase : List[str] = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_UpperCAmelCase : Optional[int] = st.sidebar.checkbox("""Demo options""")
if demo_options:
_UpperCAmelCase : List[str] = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_UpperCAmelCase : List[Any] = action_list.index(action_st)
_UpperCAmelCase : Tuple = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_UpperCAmelCase : Optional[Any] = show_type == """Show full text of passages"""
else:
_UpperCAmelCase : Union[str, Any] = 3
_UpperCAmelCase : str = True
_UpperCAmelCase : str = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_UpperCAmelCase : Optional[Any] = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_UpperCAmelCase : Optional[Any] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_UpperCAmelCase : Dict = """wiki40b"""
_UpperCAmelCase : str = """dense"""
_UpperCAmelCase : List[str] = """beam"""
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : List[str] = 64
_UpperCAmelCase : List[Any] = 256
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = st.sidebar.checkbox("""Generation options""")
if generate_options:
_UpperCAmelCase : Union[str, Any] = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_UpperCAmelCase : Dict = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_UpperCAmelCase : List[Any] = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_UpperCAmelCase : List[str] = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[int] = None
# start main text
_UpperCAmelCase : Union[str, Any] = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_UpperCAmelCase : int = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_UpperCAmelCase : Any = st.text_input("""Enter your question here:""", """""")
else:
_UpperCAmelCase : Tuple = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_UpperCAmelCase , _UpperCAmelCase : str = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_UpperCAmelCase , _UpperCAmelCase : List[Any] = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_UpperCAmelCase : int = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_UpperCAmelCase : int = support_list[:10]
_UpperCAmelCase : Tuple = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_UpperCAmelCase , _UpperCAmelCase : Any = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_UpperCAmelCase : Tuple = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_UpperCAmelCase : List[Any] = res[1].strip()
if sec_titles == "":
_UpperCAmelCase : Optional[int] = """[{}]({})""".format(res[0], wiki_url)
else:
_UpperCAmelCase : Optional[int] = sec_titles.split(""" & """)
_UpperCAmelCase : Tuple = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_UpperCAmelCase : Dict = find_nearest_training(question)
_UpperCAmelCase : List[Any] = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_UpperCAmelCase : List[Any] = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_UpperCAmelCase : List[Any] = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 683 | 0 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCamelCase__ )
if number < 0:
return False
SCREAMING_SNAKE_CASE__ = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> Optional[int]:
_UpperCamelCase : int = value
_UpperCamelCase : Node | None = None # Added in order to delete a node easier
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> List[Any]:
_UpperCamelCase : str = root
def __str__( self ) -> str:
return str(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if new_children is not None: # reset its kids
_UpperCamelCase : Union[str, Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_snake_case ): # If it is the right children
_UpperCamelCase : str = new_children
else:
_UpperCamelCase : Any = new_children
else:
_UpperCamelCase : Any = new_children
def _lowercase ( self , _snake_case ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _lowercase ( self ) -> bool:
return self.root is None
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : List[Any] = Node(_snake_case ) # create a new Node
if self.empty(): # if Tree is empty
_UpperCamelCase : Optional[Any] = new_node # set its root
else: # Tree is not empty
_UpperCamelCase : int = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_UpperCamelCase : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
_UpperCamelCase : Union[str, Any] = parent_node.left
else:
if parent_node.right is None:
_UpperCamelCase : Any = new_node
break
else:
_UpperCamelCase : str = parent_node.right
_UpperCamelCase : Any = parent_node
def _lowercase ( self , *_snake_case ) -> None:
for value in values:
self.__insert(_snake_case )
def _lowercase ( self , _snake_case ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
_UpperCamelCase : List[str] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_UpperCamelCase : Optional[Any] = node.left if value < node.value else node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
if self.root is None:
return None
_UpperCamelCase : Dict = self.root
if not self.empty():
while node.right is not None:
_UpperCamelCase : Tuple = node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
_UpperCamelCase : Optional[Any] = self.root
if self.root is None:
return None
if not self.empty():
_UpperCamelCase : Optional[int] = self.root
while node.left is not None:
_UpperCamelCase : List[str] = node.left
return node
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : str = self.search(_snake_case ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_snake_case , _snake_case )
elif node.left is None: # Has only right children
self.__reassign_nodes(_snake_case , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_snake_case , node.left )
else:
_UpperCamelCase : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_UpperCamelCase : int = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _lowercase ( self , _snake_case ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def _lowercase ( self , _snake_case=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if node:
self.inorder(_snake_case , node.left )
arr.append(node.value )
self.inorder(_snake_case , node.right )
def _lowercase ( self , _snake_case , _snake_case ) -> int:
_UpperCamelCase : list[int] = []
self.inorder(_snake_case , _snake_case ) # append all values to list using inorder traversal
return arr[k - 1]
def snake_case__ ( UpperCamelCase ) -> list[Node]:
_UpperCamelCase : int = []
if curr_node is not None:
_UpperCamelCase : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def snake_case__ ( ) -> None:
_UpperCamelCase : Any = (8, 3, 6, 1, 10, 14, 13, 4, 7)
_UpperCamelCase : Tuple = BinarySearchTree()
for i in testlist:
t.insert(UpperCamelCase )
# Prints all the elements of the list in order traversal
print(UpperCamelCase )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' ,t.get_max().value ) # type: ignore
print('''Min Value: ''' ,t.get_min().value ) # type: ignore
for i in testlist:
t.remove(UpperCamelCase )
print(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 683 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def _snake_case ( _snake_case : float , _snake_case : float , _snake_case : float ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(_snake_case , 2 ) - pow(_snake_case , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(_snake_case , 2 ) - pow(_snake_case , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(_snake_case , 2 ) + pow(_snake_case , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
_UpperCAmelCase : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
_UpperCAmelCase : int = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : Dict = 'whisper'
A__ : Tuple = ['past_key_values']
A__ : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _snake_case=51865 , _snake_case=80 , _snake_case=6 , _snake_case=4 , _snake_case=6 , _snake_case=4 , _snake_case=1536 , _snake_case=1536 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=50257 , _snake_case=True , _snake_case=True , _snake_case="gelu" , _snake_case=256 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=False , _snake_case=1500 , _snake_case=448 , _snake_case=50256 , _snake_case=50256 , _snake_case=50256 , _snake_case=None , _snake_case=[220, 50256] , _snake_case=False , _snake_case=256 , _snake_case=False , _snake_case=0.05 , _snake_case=10 , _snake_case=2 , _snake_case=0.0 , _snake_case=10 , _snake_case=0 , _snake_case=7 , **_snake_case , ) -> Any:
_UpperCamelCase : Union[str, Any] = vocab_size
_UpperCamelCase : Union[str, Any] = num_mel_bins
_UpperCamelCase : List[str] = d_model
_UpperCamelCase : str = encoder_layers
_UpperCamelCase : Optional[int] = encoder_attention_heads
_UpperCamelCase : str = decoder_layers
_UpperCamelCase : Tuple = decoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : Optional[int] = encoder_ffn_dim
_UpperCamelCase : Any = dropout
_UpperCamelCase : Optional[Any] = attention_dropout
_UpperCamelCase : List[Any] = activation_dropout
_UpperCamelCase : int = activation_function
_UpperCamelCase : List[Any] = init_std
_UpperCamelCase : Optional[int] = encoder_layerdrop
_UpperCamelCase : str = decoder_layerdrop
_UpperCamelCase : List[str] = use_cache
_UpperCamelCase : Optional[Any] = encoder_layers
_UpperCamelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase : List[str] = max_source_positions
_UpperCamelCase : Optional[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase : str = classifier_proj_size
_UpperCamelCase : List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase : int = apply_spec_augment
_UpperCamelCase : str = mask_time_prob
_UpperCamelCase : int = mask_time_length
_UpperCamelCase : List[Any] = mask_time_min_masks
_UpperCamelCase : List[str] = mask_feature_prob
_UpperCamelCase : Optional[int] = mask_feature_length
_UpperCamelCase : Union[str, Any] = mask_feature_min_masks
_UpperCamelCase : Union[str, Any] = median_filter_width
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , suppress_tokens=_snake_case , begin_suppress_tokens=_snake_case , **_snake_case , )
class UpperCAmelCase ( a_ ):
"""simple docstring"""
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
_UpperCamelCase : Dict = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
_UpperCamelCase : Tuple = {0: '''batch'''}
else:
_UpperCamelCase : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''' )
return common_inputs
def _lowercase ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , _snake_case = 22050 , _snake_case = 5.0 , _snake_case = 220 , ) -> Mapping[str, Any]:
_UpperCamelCase : Optional[int] = OrderedDict()
_UpperCamelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_snake_case , framework=_snake_case , sampling_rate=_snake_case , time_duration=_snake_case , frequency=_snake_case , )
_UpperCamelCase : int = encoder_inputs['''input_features'''].shape[2]
_UpperCamelCase : List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
_UpperCamelCase : str = super().generate_dummy_inputs(
preprocessor.tokenizer , _snake_case , _snake_case , _snake_case , _snake_case )
_UpperCamelCase : Union[str, Any] = encoder_inputs.pop('''input_features''' )
_UpperCamelCase : Dict = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
_UpperCamelCase : List[str] = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def _lowercase ( self ) -> float:
return 1E-3
| 683 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE (a__ ):
pass
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : Any = data
__A : Node | None = None
def __iter__( self):
'''simple docstring'''
__A : Dict = self
__A : List[Any] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_UpperCAmelCase)
yield node.data
__A : Optional[int] = node.next_node
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
try:
list(self)
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
lowercase__ : Tuple = Node(1)
lowercase__ : Optional[Any] = Node(2)
lowercase__ : Union[str, Any] = Node(3)
lowercase__ : Union[str, Any] = Node(4)
print(root_node.has_loop) # False
lowercase__ : int = root_node.next_node
print(root_node.has_loop) # True
lowercase__ : Optional[int] = Node(5)
lowercase__ : List[str] = Node(6)
lowercase__ : Tuple = Node(5)
lowercase__ : Dict = Node(6)
print(root_node.has_loop) # False
lowercase__ : List[Any] = Node(1)
print(root_node.has_loop) # False | 8 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase : int = parser.parse_args()
if args.model_type == "roberta":
_UpperCAmelCase : Union[str, Any] = RobertaForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase : int = """roberta"""
elif args.model_type == "gpt2":
_UpperCAmelCase : Optional[int] = GPTaLMHeadModel.from_pretrained(args.model_name)
_UpperCAmelCase : Optional[int] = """transformer"""
_UpperCAmelCase : Tuple = model.state_dict()
_UpperCAmelCase : int = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_UpperCAmelCase : Optional[Any] = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_UpperCAmelCase : Tuple = f"""{prefix}.embeddings.{w}.weight"""
_UpperCAmelCase : Optional[Any] = state_dict[param_name]
for w in ["weight", "bias"]:
_UpperCAmelCase : Union[str, Any] = f"""{prefix}.embeddings.LayerNorm.{w}"""
_UpperCAmelCase : str = state_dict[param_name]
# Transformer Blocks #
_UpperCAmelCase : Dict = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_UpperCAmelCase : str = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
_UpperCAmelCase : Any = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_UpperCAmelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_UpperCAmelCase : Dict = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase : int = state_dict[f"""lm_head.dense.{w}"""]
_UpperCAmelCase : int = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_UpperCAmelCase : List[str] = state_dict[f"""{prefix}.ln_f.{w}"""]
_UpperCAmelCase : Any = state_dict["""lm_head.weight"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 683 | 0 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , ) -> Optional[int]:
if config_name_or_path is None:
A__ = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
A__ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
A__ = question_encoder_name_or_path
A__ = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
A__ = RagConfig.from_pretrained(__UpperCamelCase )
A__ = AutoConfig.from_pretrained(__UpperCamelCase )
A__ = AutoConfig.from_pretrained(__UpperCamelCase )
A__ = gen_config
A__ = question_encoder_config
A__ = model_class.from_pretrained_question_encoder_generator(
__UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
rag_model.save_pretrained(__UpperCamelCase )
# Sanity check.
model_class.from_pretrained(__UpperCamelCase )
# Save tokenizers.
A__ = AutoTokenizer.from_pretrained(__UpperCamelCase )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
A__ = AutoTokenizer.from_pretrained(__UpperCamelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 9 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self , _snake_case , _snake_case ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : int = None
_UpperCamelCase : int = 20
_UpperCamelCase : Any = self._get_uniform_logits(batch_size=2 , length=_snake_case )
# tweak scores to not be uniform anymore
_UpperCamelCase : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_UpperCamelCase : Dict = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_UpperCamelCase : Any = jax.nn.softmax(_snake_case , axis=-1 )
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : List[str] = FlaxTemperatureLogitsWarper(temperature=1.3 )
_UpperCamelCase : List[str] = jax.nn.softmax(temp_dist_warper_sharper(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
_UpperCamelCase : str = jax.nn.softmax(temp_dist_warper_smoother(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase ( self ) -> Any:
_UpperCamelCase : List[Any] = None
_UpperCamelCase : Optional[int] = 10
_UpperCamelCase : Any = 2
# create ramp distribution
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy()
_UpperCamelCase : Union[str, Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_UpperCamelCase : Optional[int] = 5
_UpperCamelCase : str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_UpperCamelCase : Union[str, Any] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, length) ).copy()
_UpperCamelCase : Optional[Any] = top_k_warp_safety_check(_snake_case , _snake_case , cur_len=_snake_case )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : Any = None
_UpperCamelCase : Any = 10
_UpperCamelCase : List[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_UpperCamelCase : Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_UpperCamelCase : List[str] = FlaxTopPLogitsWarper(0.8 )
_UpperCamelCase : Dict = np.exp(top_p_warp(_snake_case , _snake_case , cur_len=_snake_case ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_UpperCamelCase : Optional[int] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# check edge cases with negative and extreme logits
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_UpperCamelCase : Tuple = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_UpperCamelCase : Tuple = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_UpperCamelCase : Dict = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase ( self ) -> Dict:
_UpperCamelCase : List[Any] = 20
_UpperCamelCase : Optional[int] = 4
_UpperCamelCase : int = 0
_UpperCamelCase : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
# check that min length is applied at length 5
_UpperCamelCase : Any = ids_tensor((batch_size, 20) , vocab_size=20 )
_UpperCamelCase : int = 5
_UpperCamelCase : List[Any] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
_UpperCamelCase : Optional[int] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = 15
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Optional[int] = 20
_UpperCamelCase : Union[str, Any] = 4
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
# check that all scores are -inf except the bos_token_id score
_UpperCamelCase : Union[str, Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : str = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : str = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_UpperCamelCase : List[str] = 3
_UpperCamelCase : Tuple = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 20
_UpperCamelCase : Tuple = 4
_UpperCamelCase : Any = 0
_UpperCamelCase : str = 5
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
# check that all scores are -inf except the eos_token_id when max_length is reached
_UpperCamelCase : Optional[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
_UpperCamelCase : Dict = 4
_UpperCamelCase : Dict = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : int = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_UpperCamelCase : Optional[int] = 3
_UpperCamelCase : Any = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 4
_UpperCamelCase : Optional[Any] = 10
_UpperCamelCase : Dict = 15
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : Optional[Any] = 1
_UpperCamelCase : List[Any] = 15
# dummy input_ids and scores
_UpperCamelCase : Optional[int] = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Any = input_ids.copy()
_UpperCamelCase : int = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Tuple = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : List[str] = 10
# no processor list
_UpperCamelCase : Dict = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
# with processor list
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : Optional[Any] = processor(_snake_case , _snake_case , cur_len=_snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Tuple = 4
_UpperCamelCase : int = 10
_UpperCamelCase : List[Any] = 15
_UpperCamelCase : Dict = 2
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Optional[int] = 15
# dummy input_ids and scores
_UpperCamelCase : Tuple = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Optional[Any] = input_ids.copy()
_UpperCamelCase : List[str] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : Union[str, Any] = 10
# no processor list
def run_no_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : List[Any] = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
return scores
# with processor list
def run_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : List[str] = processor(_snake_case , _snake_case , cur_len=_snake_case )
return scores
_UpperCamelCase : Dict = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jitted_run_no_processor_list(_snake_case , _snake_case , _snake_case )
_UpperCamelCase : Any = jitted_run_processor_list(_snake_case , _snake_case , _snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 683 | 0 |
def _snake_case ( __snake_case ):
_UpperCamelCase = 0
for ch in input_str:
_UpperCamelCase = ord(__snake_case )
_UpperCamelCase = pow(2 , __snake_case )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_UpperCAmelCase : Optional[int] = pytest.mark.integration
@pytest.mark.parametrize('''path''' ,['''paws''', '''csv'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Dict:
inspect_dataset(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' ,['''accuracy'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> int:
inspect_metric(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : List[str] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
_UpperCamelCase : List[str] = get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[str]:
with pytest.raises(UpperCamelCase ):
get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
@pytest.mark.parametrize(
'''path, expected''' ,[
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : int = get_dataset_config_names(UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' ,[
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
_UpperCamelCase : Dict = get_dataset_infos(UpperCamelCase )
assert list(infos.keys() ) == expected_configs
_UpperCamelCase : Dict = expected_configs[0]
assert expected_config in infos
_UpperCamelCase : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = get_dataset_infos(UpperCamelCase )
assert expected_config in infos
_UpperCamelCase : Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
with pytest.raises(UpperCamelCase ):
get_dataset_split_names(UpperCamelCase ,config_name=UpperCamelCase )
| 683 | 0 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=16 , A=36 , A=6 , A=6 , A=6 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> List[str]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = embedding_size
_a = hidden_size
_a = num_hidden_layers
_a = num_hidden_groups
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ) -> Tuple:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def a__ (self , A , A , A , A , A , A , A ) -> int:
"""simple docstring"""
_a = AlbertModel(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , token_type_ids=A )
_a = model(A , token_type_ids=A )
_a = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A ) -> str:
"""simple docstring"""
_a = AlbertForPreTraining(config=A )
model.to(A )
model.eval()
_a = model(
A , attention_mask=A , token_type_ids=A , labels=A , sentence_order_label=A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def a__ (self , A , A , A , A , A , A , A ) -> Optional[int]:
"""simple docstring"""
_a = AlbertForMaskedLM(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self , A , A , A , A , A , A , A ) -> Dict:
"""simple docstring"""
_a = AlbertForQuestionAnswering(config=A )
model.to(A )
model.eval()
_a = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ (self , A , A , A , A , A , A , A ) -> Optional[int]:
"""simple docstring"""
_a = self.num_labels
_a = AlbertForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ (self , A , A , A , A , A , A , A ) -> str:
"""simple docstring"""
_a = self.num_labels
_a = AlbertForTokenClassification(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self , A , A , A , A , A , A , A ) -> Any:
"""simple docstring"""
_a = self.num_choices
_a = AlbertForMultipleChoice(config=A )
model.to(A )
model.eval()
_a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCamelCase : str = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : str = True
def a__ (self , A , A , A=False ) -> Optional[Any]:
"""simple docstring"""
_a = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class in get_values(A ):
_a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A )
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = AlbertModelTester(self )
_a = ConfigTester(self , config_class=A , hidden_size=37 )
def a__ (self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> int:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a = type
self.model_tester.create_and_check_model(*A )
@slow
def a__ (self ) -> Any:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = AlbertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = AlbertModel.from_pretrained('''albert-base-v2''' )
_a = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_a = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_a = model(A , attention_mask=A )[0]
_a = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , A )
_a = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A , atol=1E-4 ) )
| 11 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCamelCase : Any = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def _lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def _lowercase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase : Optional[int] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
_UpperCamelCase : int = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Tuple = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_UpperCamelCase : int = DDPMScheduler()
_UpperCamelCase : Optional[int] = AudioDiffusionPipeline(vqvae=_snake_case , unet=self.dummy_unet , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case , steps=4 )
_UpperCamelCase : Union[str, Any] = output.audios[0]
_UpperCamelCase : Union[str, Any] = output.images[0]
_UpperCamelCase : str = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : int = pipe(generator=_snake_case , steps=4 , return_dict=_snake_case )
_UpperCamelCase : int = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_UpperCamelCase : List[str] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : List[str] = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : int = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_UpperCamelCase : Dict = DDIMScheduler()
_UpperCamelCase : str = self.dummy_vqvae_and_unet
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : Optional[Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_UpperCamelCase : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Tuple = pipe(raw_audio=_snake_case , generator=_snake_case , start_step=5 , steps=10 )
_UpperCamelCase : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_UpperCamelCase : Any = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Tuple = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : Any = self.dummy_unet_condition
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_snake_case , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : Union[str, Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : int = torch.rand((1, 1, 10) )
_UpperCamelCase : Optional[Any] = pipe(generator=_snake_case , encoding=_snake_case )
_UpperCamelCase : Dict = output.images[0]
_UpperCamelCase : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Any = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = torch_device
_UpperCamelCase : int = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
_UpperCamelCase : str = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case )
_UpperCamelCase : List[Any] = output.audios[0]
_UpperCamelCase : List[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_UpperCamelCase : Union[str, Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Union[str, Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 683 | 0 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCamelCase__ : List[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
lowerCamelCase__ : Dict = {
# fairseq:
"""wmt19-ru-en""": {"""length_penalty""": 1.1},
"""wmt19-en-ru""": {"""length_penalty""": 1.15},
"""wmt19-en-de""": {"""length_penalty""": 1.0},
"""wmt19-de-en""": {"""length_penalty""": 1.1},
# allenai:
"""wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-12-1""": {"""length_penalty""": 0.8},
"""wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6},
"""wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6},
}
# this remaps the different models to their organization names
lowerCamelCase__ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ : List[str] = """facebook"""
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
lowerCamelCase__ : Optional[Any] = """allenai"""
def UpperCamelCase ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : Any = dict((re.sub(R"""@@$""" , """""" , lowercase_ ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , lowercase_ ), v) for k, v in d.items() )
lowercase__ : List[str] = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
lowercase__ : int = d[k] # restore
return da
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
assert os.path.exists(lowercase_ )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
lowercase__ : int = basename(lowercase_ )
lowercase__ : List[Any] = dirname(lowercase_ )
lowercase__ : List[str] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowercase__ : int = cls.hub_models()
lowercase__ : Tuple = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
lowercase__ : Optional[Any] = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'using checkpoint {checkpoint_file}' )
lowercase__ : Optional[int] = hub_utils.from_pretrained(
lowercase_ , lowercase_ , lowercase_ , archive_map=lowercase_ , **lowercase_ )
lowercase__ : Tuple = vars(chkpt["""args"""]["""model"""] )
lowercase__ : List[Any] = args["""source_lang"""]
lowercase__ : Dict = args["""target_lang"""]
lowercase__ : Optional[Any] = dirname(lowercase_ )
lowercase__ : int = basename(lowercase_ )
# dicts
lowercase__ : Union[str, Any] = os.path.join(lowercase_ , F'dict.{src_lang}.txt' )
lowercase__ : Optional[int] = os.path.join(lowercase_ , F'dict.{tgt_lang}.txt' )
lowercase__ : Optional[int] = Dictionary.load(lowercase_ )
lowercase__ : Union[str, Any] = rewrite_dict_keys(src_dict.indices )
lowercase__ : str = len(lowercase_ )
lowercase__ : Any = os.path.join(lowercase_ , """vocab-src.json""" )
print(F'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowercase__ : Any = True
for k in src_vocab.keys():
if not k.islower():
lowercase__ : List[str] = False
break
lowercase__ : Tuple = Dictionary.load(lowercase_ )
lowercase__ : Any = rewrite_dict_keys(tgt_dict.indices )
lowercase__ : Any = len(lowercase_ )
lowercase__ : Any = os.path.join(lowercase_ , """vocab-tgt.json""" )
print(F'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# merges_file (bpecodes)
lowercase__ : Union[str, Any] = os.path.join(lowercase_ , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowercase__ : Optional[int] = os.path.join(lowercase_ , lowercase_ )
if os.path.exists(lowercase_ ):
break
with open(lowercase_ , encoding="""utf-8""" ) as fin:
lowercase__ : List[Any] = fin.read()
lowercase__ : Optional[Any] = re.sub(R""" \d+$""" , """""" , lowercase_ , 0 , re.M ) # remove frequency number
print(F'Generating {merges_file}' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as fout:
fout.write(lowercase_ )
# model config
lowercase__ : Tuple = os.path.join(lowercase_ , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", F'need to extend tokenizer to support bpe={args["tokenizer"]}'
lowercase__ : List[str] = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
lowercase__ : Optional[int] = 5
lowercase__ : List[str] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowercase__ : Optional[int] = best_score_hparams[model_dir]["""length_penalty"""]
else:
lowercase__ : Union[str, Any] = 1.0
print(F'Generating {fsmt_model_config_file}' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# tokenizer config
lowercase__ : Optional[int] = os.path.join(lowercase_ , lowercase_ )
lowercase__ : int = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 10_24,
"""do_lower_case""": do_lower_case,
}
print(F'Generating {fsmt_tokenizer_config_file}' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# model
lowercase__ : Dict = chkpt["""models"""][0]
lowercase__ : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowercase__ : Any = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowercase__ : List[Any] = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(lowercase_ , lowercase_ )
lowercase__ : str = FSMTConfig.from_pretrained(lowercase_ )
lowercase__ : List[str] = FSMTForConditionalGeneration(lowercase_ )
# check that it loads ok
model_new.load_state_dict(lowercase_ , strict=lowercase_ )
# save
lowercase__ : str = os.path.join(lowercase_ , lowercase_ )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowercase_ , lowercase_ )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(F'cd {data_root}' )
print(F'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fsmt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase__ : List[str] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 12 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCAmelCase : Tuple = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 683 | 0 |
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]=10_00 ) -> Union[str, Any]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__lowerCamelCase : Any = n - 1
__lowerCamelCase : Tuple = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__lowerCamelCase : Tuple = 0
while count < prec:
__lowerCamelCase : List[str] = random.randint(2 , n - 1 )
__lowerCamelCase : Tuple = bin_exp_mod(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if b != 1:
__lowerCamelCase : Optional[Any] = True
for _ in range(UpperCAmelCase_ ):
if b == n - 1:
__lowerCamelCase : Any = False
break
__lowerCamelCase : List[str] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
A__ : Tuple = abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 13 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : Optional[int] = {
"""distilbert-base-uncased""": 512,
"""distilbert-base-uncased-distilled-squad""": 512,
"""distilbert-base-cased""": 512,
"""distilbert-base-cased-distilled-squad""": 512,
"""distilbert-base-german-cased""": 512,
"""distilbert-base-multilingual-cased""": 512,
}
_UpperCAmelCase : Any = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : Dict = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A__ : Union[str, Any] = ['input_ids', 'attention_mask']
A__ : Tuple = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ) -> int:
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
_UpperCamelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _snake_case ) != tokenize_chinese_chars
):
_UpperCamelCase : int = getattr(_snake_case , normalizer_state.pop('''type''' ) )
_UpperCamelCase : Optional[int] = do_lower_case
_UpperCamelCase : Dict = strip_accents
_UpperCamelCase : List[Any] = tokenize_chinese_chars
_UpperCamelCase : Tuple = normalizer_class(**_snake_case )
_UpperCamelCase : Dict = do_lower_case
def _lowercase ( self , _snake_case , _snake_case=None ) -> Optional[int]:
_UpperCamelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , _snake_case , _snake_case = None ) -> List[int]:
_UpperCamelCase : Union[str, Any] = [self.sep_token_id]
_UpperCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
_UpperCamelCase : Optional[Any] = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 683 | 0 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
a__ = input('''Enter image url: ''').strip()
print(f'''Downloading image from {url} ...''')
a__ = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
a__ = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
a__ = requests.get(image_url).content
a__ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(f'''Done. Image saved to disk as {file_name}.''')
| 14 |
'''simple docstring'''
def snake_case__ ( UpperCamelCase ) -> list:
_UpperCamelCase : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCamelCase : List[str] = True
for i in range(0 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : int = False
for i in range(1 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : Optional[int] = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase : Optional[int] = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase : Union[str, Any] = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 683 | 0 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
A : Optional[int] = logging.get_logger(__name__)
def UpperCamelCase ( __magic_name__ : np.ndarray , __magic_name__ : Union[int, Iterable[int]] , __magic_name__ : bool , __magic_name__ : int ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(__magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Dict=0 , __magic_name__ : Dict=None ):
lowercase__ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowercase__ = math.floor(val / multiple ) * multiple
if x < min_val:
lowercase__ = math.ceil(val / multiple ) * multiple
return x
lowercase__ = (output_size, output_size) if isinstance(__magic_name__ , __magic_name__ ) else output_size
lowercase__ , lowercase__ = get_image_size(__magic_name__ )
lowercase__ , lowercase__ = output_size
# determine new height and width
lowercase__ = output_height / input_height
lowercase__ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowercase__ = scale_width
else:
# fit height
lowercase__ = scale_height
lowercase__ = constraint_to_multiple_of(scale_height * input_height , multiple=__magic_name__ )
lowercase__ = constraint_to_multiple_of(scale_width * input_width , multiple=__magic_name__ )
return (new_height, new_width)
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = ['''pixel_values''']
def __init__(self : Any , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 1 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 255 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , **_UpperCAmelCase : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
lowercase__ = size if size is not None else {"""height""": 384, """width""": 384}
lowercase__ = get_size_dict(_UpperCAmelCase )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = keep_aspect_ratio
lowercase__ = ensure_multiple_of
lowercase__ = resample
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 1 , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowercase__ = get_resize_output_image_size(
_UpperCAmelCase , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=_UpperCAmelCase , multiple=_UpperCAmelCase , )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowerCamelCase__ (self : str , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : str , ) -> int:
"""simple docstring"""
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowerCamelCase__ (self : str , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : int = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : int = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : Optional[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(_UpperCAmelCase )
lowercase__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowercase__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
lowercase__ = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
lowercase__ = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
lowercase__ = {"""pixel_values""": images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Tuple] = None ) -> Dict:
"""simple docstring"""
lowercase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(_UpperCAmelCase ):
lowercase__ = target_sizes.numpy()
lowercase__ = []
for idx in range(len(_UpperCAmelCase ) ):
lowercase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_UpperCAmelCase )
lowercase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_UpperCAmelCase )
else:
lowercase__ = logits.argmax(dim=1 )
lowercase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 15 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = checkpoint
_UpperCamelCase : int = {}
_UpperCamelCase : int = vae_state_dict['''encoder.conv_in.weight''']
_UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_in.bias''']
_UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_out.weight''']
_UpperCamelCase : Any = vae_state_dict['''encoder.conv_out.bias''']
_UpperCamelCase : List[Any] = vae_state_dict['''encoder.norm_out.weight''']
_UpperCamelCase : str = vae_state_dict['''encoder.norm_out.bias''']
_UpperCamelCase : str = vae_state_dict['''decoder.conv_in.weight''']
_UpperCamelCase : List[Any] = vae_state_dict['''decoder.conv_in.bias''']
_UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.weight''']
_UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.bias''']
_UpperCamelCase : int = vae_state_dict['''decoder.norm_out.weight''']
_UpperCamelCase : Dict = vae_state_dict['''decoder.norm_out.bias''']
_UpperCamelCase : Optional[int] = vae_state_dict['''quant_conv.weight''']
_UpperCamelCase : int = vae_state_dict['''quant_conv.bias''']
_UpperCamelCase : List[Any] = vae_state_dict['''post_quant_conv.weight''']
_UpperCamelCase : Optional[int] = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
_UpperCamelCase : Optional[int] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
_UpperCamelCase : Tuple = {
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
_UpperCamelCase : Any = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
_UpperCamelCase : int = {
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(UpperCamelCase )
}
for i in range(UpperCamelCase ):
_UpperCamelCase : Any = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
_UpperCamelCase : Optional[int] = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
_UpperCamelCase : Dict = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
_UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Union[str, Any] = {'''old''': f'''down.{i}.block''', '''new''': f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : List[str] = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
_UpperCamelCase : Tuple = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
_UpperCamelCase : Optional[int] = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
_UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Tuple = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : Tuple = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
_UpperCamelCase : List[str] = renew_vae_attention_paths(UpperCamelCase )
_UpperCamelCase : Any = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
for i in range(UpperCamelCase ):
_UpperCamelCase : Union[str, Any] = num_up_blocks - 1 - i
_UpperCamelCase : Optional[int] = [
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
_UpperCamelCase : Tuple = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
_UpperCamelCase : Any = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
_UpperCamelCase : Any = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Any = {'''old''': f'''up.{block_id}.block''', '''new''': f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : List[Any] = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
_UpperCamelCase : Optional[Any] = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
_UpperCamelCase : int = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
_UpperCamelCase : Optional[int] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Optional[Any] = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : Tuple = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
_UpperCamelCase : Tuple = renew_vae_attention_paths(UpperCamelCase )
_UpperCamelCase : Dict = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
return new_checkpoint
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,) -> List[str]:
# Only support V1
_UpperCamelCase : Tuple = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
_UpperCamelCase : List[Any] = io.BytesIO(r.content )
_UpperCamelCase : Optional[int] = OmegaConf.load(UpperCamelCase )
_UpperCamelCase : str = 5_12
_UpperCamelCase : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
_UpperCamelCase : str = {}
with safe_open(UpperCamelCase ,framework='''pt''' ,device='''cpu''' ) as f:
for key in f.keys():
_UpperCamelCase : Union[str, Any] = f.get_tensor(UpperCamelCase )
else:
_UpperCamelCase : str = torch.load(UpperCamelCase ,map_location=UpperCamelCase )['''state_dict''']
# Convert the VAE model.
_UpperCamelCase : Dict = create_vae_diffusers_config(UpperCamelCase ,image_size=UpperCamelCase )
_UpperCamelCase : str = custom_convert_ldm_vae_checkpoint(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Dict = AutoencoderKL(**UpperCamelCase )
vae.load_state_dict(UpperCamelCase )
vae.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
_UpperCAmelCase : int = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 683 | 0 |
def __a ( A__ : int ):
if not isinstance(A__ , A__ ):
SCREAMING_SNAKE_CASE = F"Input value of [number={number}] must be an integer"
raise TypeError(A__ )
if number < 0:
return False
SCREAMING_SNAKE_CASE = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 16 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : str = ['image_processor', 'tokenizer']
A__ : Dict = 'CLIPImageProcessor'
A__ : str = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> List[Any]:
_UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
_UpperCamelCase : Optional[Any] = kwargs.pop('''feature_extractor''' )
_UpperCamelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Dict:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_UpperCamelCase : List[str] = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
_UpperCamelCase : str = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
_UpperCamelCase : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Tuple:
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Any:
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _lowercase ( self ) -> int:
_UpperCamelCase : Optional[int] = self.tokenizer.model_input_names
_UpperCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 683 | 0 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 17 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_UpperCAmelCase : Union[str, Any] = (720, 1280) # Height, Width
_UpperCAmelCase : str = (0.4, 0.6) # if height or width lower than this scale, drop it.
_UpperCAmelCase : Optional[Any] = 1 / 100
_UpperCAmelCase : Optional[Any] = """"""
_UpperCAmelCase : int = """"""
_UpperCAmelCase : Union[str, Any] = """"""
_UpperCAmelCase : List[Any] = 250
def snake_case__ ( ) -> None:
_UpperCamelCase, _UpperCamelCase : List[Any] = get_dataset(UpperCamelCase ,UpperCamelCase )
for index in range(UpperCamelCase ):
_UpperCamelCase : List[str] = random.sample(range(len(UpperCamelCase ) ) ,4 )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = update_image_and_anno(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,filter_scale=UpperCamelCase ,)
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_UpperCamelCase : List[str] = random_chars(32 )
_UpperCamelCase : List[str] = path.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
_UpperCamelCase : Any = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' ,UpperCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
_UpperCamelCase : Any = []
for anno in new_annos:
_UpperCamelCase : List[Any] = anno[3] - anno[1]
_UpperCamelCase : int = anno[4] - anno[2]
_UpperCamelCase : int = anno[1] + width / 2
_UpperCamelCase : int = anno[2] + height / 2
_UpperCamelCase : Optional[Any] = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(UpperCamelCase )
with open(f'''{file_root}.txt''' ,'''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> tuple[list, list]:
_UpperCamelCase : List[str] = []
_UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(UpperCamelCase ,'''*.txt''' ) ):
_UpperCamelCase : int = label_file.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
with open(UpperCamelCase ) as in_file:
_UpperCamelCase : Dict = in_file.readlines()
_UpperCamelCase : Tuple = os.path.join(UpperCamelCase ,f'''{label_name}.jpg''' )
_UpperCamelCase : Tuple = []
for obj_list in obj_lists:
_UpperCamelCase : List[Any] = obj_list.rstrip('''\n''' ).split(''' ''' )
_UpperCamelCase : Tuple = float(obj[1] ) - float(obj[3] ) / 2
_UpperCamelCase : Any = float(obj[2] ) - float(obj[4] ) / 2
_UpperCamelCase : Tuple = float(obj[1] ) + float(obj[3] ) / 2
_UpperCamelCase : List[Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCamelCase )
labels.append(UpperCamelCase )
return img_paths, labels
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = 0.0 ,) -> tuple[list, list, str]:
_UpperCamelCase : Optional[int] = np.zeros([output_size[0], output_size[1], 3] ,dtype=np.uinta )
_UpperCamelCase : str = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = int(scale_x * output_size[1] )
_UpperCamelCase : Dict = int(scale_y * output_size[0] )
_UpperCamelCase : int = []
_UpperCamelCase : Union[str, Any] = []
for i, index in enumerate(UpperCamelCase ):
_UpperCamelCase : Optional[int] = all_img_list[index]
path_list.append(UpperCamelCase )
_UpperCamelCase : str = all_annos[index]
_UpperCamelCase : Tuple = cva.imread(UpperCamelCase )
if i == 0: # top-left
_UpperCamelCase : Any = cva.resize(UpperCamelCase ,(divid_point_x, divid_point_y) )
_UpperCamelCase : Any = img
for bbox in img_annos:
_UpperCamelCase : List[Any] = bbox[1] * scale_x
_UpperCamelCase : Dict = bbox[2] * scale_y
_UpperCamelCase : Any = bbox[3] * scale_x
_UpperCamelCase : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_UpperCamelCase : Union[str, Any] = cva.resize(UpperCamelCase ,(output_size[1] - divid_point_x, divid_point_y) )
_UpperCamelCase : List[Any] = img
for bbox in img_annos:
_UpperCamelCase : Any = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Optional[Any] = bbox[2] * scale_y
_UpperCamelCase : Any = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : Optional[int] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_UpperCamelCase : Dict = cva.resize(UpperCamelCase ,(divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : List[str] = img
for bbox in img_annos:
_UpperCamelCase : int = bbox[1] * scale_x
_UpperCamelCase : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : int = bbox[3] * scale_x
_UpperCamelCase : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_UpperCamelCase : Dict = cva.resize(
UpperCamelCase ,(output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : Union[str, Any] = img
for bbox in img_annos:
_UpperCamelCase : Optional[int] = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_UpperCamelCase : Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case__ ( UpperCamelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_UpperCamelCase : Tuple = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase ) for _ in range(UpperCamelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 683 | 0 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def __a(SCREAMING_SNAKE_CASE_ : Namespace ):
'''simple docstring'''
return TrainCommand(SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( __magic_name__ ):
@staticmethod
def _snake_case ( _lowerCAmelCase ) -> Any:
_lowerCAmelCase = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=_lowerCAmelCase , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=_lowerCAmelCase , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=_lowerCAmelCase , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=_lowerCAmelCase , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=_lowerCAmelCase , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=_lowerCAmelCase , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=_lowerCAmelCase , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=_lowerCAmelCase , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=_lowerCAmelCase , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=_lowerCAmelCase , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=_lowerCAmelCase , default=3E-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=_lowerCAmelCase , default=1E-08 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=_lowerCAmelCase )
def __init__( self , _lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = logging.get_logger("transformers-cli/training" )
_lowerCAmelCase = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=_lowerCAmelCase )
_lowerCAmelCase = args.output
_lowerCAmelCase = args.column_label
_lowerCAmelCase = args.column_text
_lowerCAmelCase = args.column_id
self.logger.info(f'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
_lowerCAmelCase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'''Loading dataset from {args.train_data}''' )
_lowerCAmelCase = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase = None
if args.validation_data:
self.logger.info(f'''Loading validation dataset from {args.validation_data}''' )
_lowerCAmelCase = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase = args.validation_split
_lowerCAmelCase = args.train_batch_size
_lowerCAmelCase = args.valid_batch_size
_lowerCAmelCase = args.learning_rate
_lowerCAmelCase = args.adam_epsilon
def _snake_case ( self ) -> Optional[Any]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _snake_case ( self ) -> Optional[int]:
raise NotImplementedError
def _snake_case ( self ) -> List[str]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 18 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCAmelCase ( a_ ):
"""simple docstring"""
@slow
@require_torch
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
_UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_UpperCamelCase : Optional[Any] = bertabert.config.encoder.vocab_size
_UpperCamelCase : List[str] = tokenizer.sep_token_id
_UpperCamelCase : List[str] = tokenizer.cls_token_id
_UpperCamelCase : Optional[Any] = 128
_UpperCamelCase : int = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
_UpperCamelCase : Dict = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
_UpperCamelCase : Dict = train_dataset.select(range(32 ) )
_UpperCamelCase : Tuple = val_dataset.select(range(16 ) )
_UpperCamelCase : Union[str, Any] = 4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCamelCase : Optional[Any] = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_snake_case , max_length=512 )
_UpperCamelCase : Optional[int] = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_snake_case , max_length=128 )
_UpperCamelCase : str = inputs.input_ids
_UpperCamelCase : Union[str, Any] = inputs.attention_mask
_UpperCamelCase : str = outputs.input_ids
_UpperCamelCase : str = outputs.input_ids.copy()
_UpperCamelCase : Tuple = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
_UpperCamelCase : Union[str, Any] = outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
_UpperCamelCase : Dict = pred.label_ids
_UpperCamelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_UpperCamelCase : Any = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCamelCase : Dict = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCamelCase : int = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
_UpperCamelCase : Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
_UpperCamelCase : List[Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
_UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_UpperCamelCase : Union[str, Any] = SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy='''steps''' , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCamelCase : Optional[int] = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train()
| 683 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
_UpperCamelCase = {
'''do_resize''': True,
'''size''': {'''height''': 2_24, '''width''': 2_24},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
'''do_convert_rgb''': True,
}
_UpperCamelCase = os.path.join(self.tmpdirname , __a)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(__a , __a)
def UpperCAmelCase ( self , **__a) -> List[Any]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__a)
def UpperCAmelCase ( self , **__a) -> Any:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__a)
def UpperCAmelCase ( self , **__a) -> List[str]:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
_UpperCamelCase = [Image.fromarray(np.moveaxis(__a , 0 , -1)) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_UpperCamelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__a)
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_UpperCamelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , __a)
self.assertIsInstance(processor_fast.tokenizer , __a)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , __a)
self.assertIsInstance(processor_fast.image_processor , __a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_UpperCamelCase = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''')
_UpperCamelCase = self.get_image_processor(do_normalize=__a)
_UpperCamelCase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=__a)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , __a)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = image_processor(__a , return_tensors='''np''')
_UpperCamelCase = processor(images=__a , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
_UpperCamelCase = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCamelCase = processor(text=__a)
_UpperCamelCase = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
_UpperCamelCase = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=__a , images=__a)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
_UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCamelCase = processor.batch_decode(__a)
_UpperCamelCase = tokenizer.batch_decode(__a)
self.assertListEqual(__a , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
_UpperCamelCase = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=__a , images=__a)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 19 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def snake_case__ ( UpperCamelCase=None ) -> Optional[int]:
if subparsers is not None:
_UpperCamelCase : Dict = subparsers.add_parser('''env''' )
else:
_UpperCamelCase : Tuple = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' ,default=UpperCamelCase ,help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase )
return parser
def snake_case__ ( UpperCamelCase ) -> Any:
_UpperCamelCase : int = torch.__version__
_UpperCamelCase : int = torch.cuda.is_available()
_UpperCamelCase : List[str] = is_xpu_available()
_UpperCamelCase : Dict = is_npu_available()
_UpperCamelCase : Optional[Any] = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCamelCase ):
_UpperCamelCase : List[str] = load_config_from_file(args.config_file ).to_dict()
_UpperCamelCase : List[Any] = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''PyTorch XPU available''': str(UpperCamelCase ),
'''PyTorch NPU available''': str(UpperCamelCase ),
'''System RAM''': f'''{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB''',
}
if pt_cuda_available:
_UpperCamelCase : int = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([f'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
_UpperCamelCase : Union[str, Any] = (
'''\n'''.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase ,UpperCamelCase )
else f'''\t{accelerate_config}'''
)
print(UpperCamelCase )
_UpperCamelCase : str = accelerate_config
return info
def snake_case__ ( ) -> int:
_UpperCamelCase : str = env_command_parser()
_UpperCamelCase : Any = parser.parse_args()
env_command(UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 683 | 0 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_lowerCAmelCase: Union[str, Any] = logging.get_logger('transformers.models.encodec')
_lowerCAmelCase: List[Any] = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
_lowerCAmelCase: str = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
_lowerCAmelCase: Optional[int] = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
_lowerCAmelCase: Dict = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
_lowerCAmelCase: Tuple = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
_lowerCAmelCase: str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_lowerCAmelCase: Dict = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_lowerCAmelCase: Tuple = []
_lowerCAmelCase: str = []
def _lowercase( __a : Any , __a : List[str] , __a : Tuple , __a : Union[str, Any] , __a : Optional[Any] ):
for attribute in key.split('.' ):
a__ =getattr(__a , __a )
if weight_type is not None:
a__ =getattr(__a , __a ).shape
else:
a__ =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a__ =value
elif weight_type == "weight_g":
a__ =value
elif weight_type == "weight_v":
a__ =value
elif weight_type == "bias":
a__ =value
elif weight_type == "running_mean":
a__ =value
elif weight_type == "running_var":
a__ =value
elif weight_type == "num_batches_tracked":
a__ =value
elif weight_type == "weight_ih_l0":
a__ =value
elif weight_type == "weight_hh_l0":
a__ =value
elif weight_type == "bias_ih_l0":
a__ =value
elif weight_type == "bias_hh_l0":
a__ =value
elif weight_type == "weight_ih_l1":
a__ =value
elif weight_type == "weight_hh_l1":
a__ =value
elif weight_type == "bias_ih_l1":
a__ =value
elif weight_type == "bias_hh_l1":
a__ =value
else:
a__ =value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def _lowercase( __a : Optional[int] , __a : Union[str, Any] ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
a__ , a__ =key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowercase( __a : str , __a : int , __a : Tuple ):
a__ =[]
if model_name == "encodec_24khz" or "encodec_32khz":
a__ =MAPPING_24K
elif model_name == "encodec_48khz":
a__ =MAPPING_48K
else:
raise ValueError(f"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(__a , __a ):
logger.info(f"""{name} was ignored""" )
continue
a__ =False
for key, mapped_key in MAPPING.items():
if "*" in key:
a__ , a__ =key.split('.*.' )
if prefix in name and suffix in name:
a__ =suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
a__ =True
if "*" in mapped_key:
a__ =name.split(__a )[0].split('.' )[-2]
a__ =mapped_key.replace('*' , __a )
if "weight_g" in name:
a__ ='weight_g'
elif "weight_v" in name:
a__ ='weight_v'
elif "weight_ih_l0" in name:
a__ ='weight_ih_l0'
elif "weight_hh_l0" in name:
a__ ='weight_hh_l0'
elif "bias_ih_l0" in name:
a__ ='bias_ih_l0'
elif "bias_hh_l0" in name:
a__ ='bias_hh_l0'
elif "weight_ih_l1" in name:
a__ ='weight_ih_l1'
elif "weight_hh_l1" in name:
a__ ='weight_hh_l1'
elif "bias_ih_l1" in name:
a__ ='bias_ih_l1'
elif "bias_hh_l1" in name:
a__ ='bias_hh_l1'
elif "bias" in name:
a__ ='bias'
elif "weight" in name:
a__ ='weight'
elif "running_mean" in name:
a__ ='running_mean'
elif "running_var" in name:
a__ ='running_var'
elif "num_batches_tracked" in name:
a__ ='num_batches_tracked'
else:
a__ =None
set_recursively(__a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def _lowercase( __a : str , __a : int , __a : str , __a : Tuple=None , __a : Optional[int]=None , ):
if config_path is not None:
a__ =EncodecConfig.from_pretrained(__a )
else:
a__ =EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
a__ =[8, 5, 4, 4]
a__ =[2.2]
a__ =64
a__ =3_2000
a__ =2048
a__ =False
a__ =False
a__ =False
elif model_name == "encodec_48khz":
a__ =[8, 5, 4, 2]
a__ =[3.0, 6.0, 12.0, 24.0]
a__ =4_8000
a__ =2
a__ =False
a__ ='time_group_norm'
a__ =True
a__ =1.0
a__ =0.01
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
a__ =EncodecModel(__a )
a__ =EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__a )
a__ =torch.load(__a )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
a__ =original_checkpoint['best_state']
recursively_load_weights(__a , __a , __a )
model.save_pretrained(__a )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(__a )
model.push_to_hub(__a )
if __name__ == "__main__":
_lowerCAmelCase: str = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_lowerCAmelCase: str = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 20 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
def snake_case__ ( UpperCamelCase ) -> Tuple:
_UpperCamelCase : str = '''huggingface/label-files'''
_UpperCamelCase : Optional[Any] = '''imagenet-1k-id2label.json'''
_UpperCamelCase : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase ,UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
_UpperCamelCase : Optional[int] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_UpperCamelCase : Dict = {v: k for k, v in idalabel.items()}
_UpperCamelCase : Optional[Any] = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_UpperCamelCase : Union[str, Any] = BitConfig(
conv_layer=UpperCamelCase ,num_labels=10_00 ,idalabel=UpperCamelCase ,labelaid=UpperCamelCase ,)
return config
def snake_case__ ( UpperCamelCase ) -> str:
if "stem.conv" in name:
_UpperCamelCase : Any = name.replace('''stem.conv''' ,'''bit.embedder.convolution''' )
if "blocks" in name:
_UpperCamelCase : Union[str, Any] = name.replace('''blocks''' ,'''layers''' )
if "head.fc" in name:
_UpperCamelCase : Optional[Any] = name.replace('''head.fc''' ,'''classifier.1''' )
if name.startswith('''norm''' ):
_UpperCamelCase : Any = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
_UpperCamelCase : List[Any] = '''bit.encoder.''' + name
return name
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase : List[str] = Image.open(requests.get(UpperCamelCase ,stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[Any]:
_UpperCamelCase : str = get_config(UpperCamelCase )
# load original model from timm
_UpperCamelCase : int = create_model(UpperCamelCase ,pretrained=UpperCamelCase )
timm_model.eval()
# load state_dict of original model
_UpperCamelCase : int = timm_model.state_dict()
for key in state_dict.copy().keys():
_UpperCamelCase : int = state_dict.pop(UpperCamelCase )
_UpperCamelCase : Any = val.squeeze() if '''head''' in key else val
# load HuggingFace model
_UpperCamelCase : List[str] = BitForImageClassification(UpperCamelCase )
model.eval()
model.load_state_dict(UpperCamelCase )
# create image processor
_UpperCamelCase : Optional[int] = create_transform(**resolve_data_config({} ,model=UpperCamelCase ) )
_UpperCamelCase : Any = transform.transforms
_UpperCamelCase : List[str] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
_UpperCamelCase : List[str] = BitImageProcessor(
do_resize=UpperCamelCase ,size={'''shortest_edge''': timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=UpperCamelCase ,crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} ,do_normalize=UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCamelCase : str = prepare_img()
_UpperCamelCase : Dict = transform(UpperCamelCase ).unsqueeze(0 )
_UpperCamelCase : Dict = processor(UpperCamelCase ,return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(UpperCamelCase ,UpperCamelCase )
# verify logits
with torch.no_grad():
_UpperCamelCase : Optional[int] = model(UpperCamelCase )
_UpperCamelCase : Optional[int] = outputs.logits
print('''Logits:''' ,logits[0, :3] )
print('''Predicted class:''' ,model.config.idalabel[logits.argmax(-1 ).item()] )
_UpperCamelCase : List[Any] = timm_model(UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase ,outputs.logits ,atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
_UpperCAmelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 683 | 0 |
import argparse
import copy
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Tuple ={}
with open(lowerCamelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__magic_name__ : Optional[Any] =[]
_list.append([line.split()[1], line.split()[2]] )
__magic_name__ : Dict =_list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__magic_name__ : List[Any] =[]
_list.append([line.split()[0], line.split()[2]] )
__magic_name__ : Tuple =_list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
with open(lowerCamelCase ) as f:
__magic_name__ : Optional[int] =f.read(1 )
__magic_name__ : Any =start_node
__magic_name__ : Union[str, Any] =[]
__magic_name__ : Dict =start_node
__magic_name__ : List[Any] =0
while visiting not in first_solution:
__magic_name__ : Dict =10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(lowerCamelCase ) and k[0] not in first_solution:
__magic_name__ : Tuple =k[1]
__magic_name__ : str =k[0]
first_solution.append(lowerCamelCase )
__magic_name__ : Optional[Any] =distance_of_first_solution + int(lowerCamelCase )
__magic_name__ : Tuple =best_node
first_solution.append(lowerCamelCase )
__magic_name__ : Optional[Any] =0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__magic_name__ : Union[str, Any] =(
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : Union[str, Any] =[]
for n in solution[1:-1]:
__magic_name__ : Union[str, Any] =solution.index(lowerCamelCase )
for kn in solution[1:-1]:
__magic_name__ : Union[str, Any] =solution.index(lowerCamelCase )
if n == kn:
continue
__magic_name__ : str =copy.deepcopy(lowerCamelCase )
__magic_name__ : List[str] =kn
__magic_name__ : List[str] =n
__magic_name__ : List[Any] =0
for k in _tmp[:-1]:
__magic_name__ : Optional[int] =_tmp[_tmp.index(lowerCamelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__magic_name__ : int =distance + int(i[1] )
_tmp.append(lowerCamelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__magic_name__ : List[str] =len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda lowerCamelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : List[str] =1
__magic_name__ : List[str] =first_solution
__magic_name__ : int =[]
__magic_name__ : Dict =distance_of_first_solution
__magic_name__ : Union[str, Any] =solution
while count <= iters:
__magic_name__ : Dict =find_neighborhood(lowerCamelCase , lowerCamelCase )
__magic_name__ : Any =0
__magic_name__ : Any =neighborhood[index_of_best_solution]
__magic_name__ : Optional[Any] =len(lowerCamelCase ) - 1
__magic_name__ : List[str] =False
while not found:
__magic_name__ : Any =0
while i < len(lowerCamelCase ):
if best_solution[i] != solution[i]:
__magic_name__ : Any =best_solution[i]
__magic_name__ : Optional[Any] =solution[i]
break
__magic_name__ : int =i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__magic_name__ : Optional[int] =True
__magic_name__ : List[str] =best_solution[:-1]
__magic_name__ : Optional[Any] =neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__magic_name__ : List[Any] =cost
__magic_name__ : List[Any] =solution
else:
__magic_name__ : Optional[Any] =index_of_best_solution + 1
__magic_name__ : List[str] =neighborhood[index_of_best_solution]
if len(lowerCamelCase ) >= size:
tabu_list.pop(0 )
__magic_name__ : Optional[int] =count + 1
return best_solution_ever, best_cost
def lowerCAmelCase_ ( lowerCamelCase=None ):
__magic_name__ : int =generate_neighbours(args.File )
__magic_name__ , __magic_name__ : str =generate_first_solution(
args.File , lowerCamelCase )
__magic_name__ , __magic_name__ : List[Any] =tabu_search(
lowerCamelCase , lowerCamelCase , lowerCamelCase , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 21 |
'''simple docstring'''
_UpperCAmelCase : Any = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def snake_case__ ( UpperCamelCase ) -> int:
_UpperCamelCase : Any = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_UpperCAmelCase : list[bool | None] = [None] * 10000000
_UpperCAmelCase : str = True
_UpperCAmelCase : Tuple = False
def snake_case__ ( UpperCamelCase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_UpperCamelCase : List[str] = chain(next_number(UpperCamelCase ) )
_UpperCamelCase : Tuple = number_chain
while number < 10_00_00_00:
_UpperCamelCase : int = number_chain
number *= 10
return number_chain
def snake_case__ ( UpperCamelCase = 10_00_00_00 ) -> int:
for i in range(1 ,UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 683 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
_a = {}
if train_file is not None:
_a = [train_file]
if eval_file is not None:
_a = [eval_file]
if test_file is not None:
_a = [test_file]
_a = datasets.load_dataset('''csv''' , data_files=UpperCamelCase )
_a = list(ds[list(files.keys() )[0]].features.keys() )
_a = features_name.pop(UpperCamelCase )
_a = list(set(ds[list(files.keys() )[0]][label_name] ) )
_a = {label: i for i, label in enumerate(UpperCamelCase )}
_a = tokenizer.model_input_names
_a = {}
if len(UpperCamelCase ) == 1:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' ) , batched=UpperCamelCase , )
elif len(UpperCamelCase ) == 2:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , ) , batched=UpperCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_a = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_a = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_a = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_snake_case : str = logging.getLogger(__name__)
@dataclass
class A :
lowercase_ = field(metadata={'help': 'Which column contains the label'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the training file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the development file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the test file'} )
lowercase_ = field(
default=128 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase_ = field(
default=_a ,metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A :
lowercase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase_ = field(default=_a ,metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase_ = field(
default=_a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
def snake_case_ ():
'''simple docstring'''
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_a , _a , _a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
f'16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_a , _a , _a , _a = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase ) , labelaid=UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_a = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase : EvalPrediction ) -> Dict:
_a = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_a = TFTrainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , compute_metrics=UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_a = trainer.evaluate()
_a = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
results.update(UpperCamelCase )
return results
if __name__ == "__main__":
main()
| 22 |
'''simple docstring'''
_UpperCAmelCase : str = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : List[str] = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> str:
assert len(str(UpperCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_UpperCamelCase : Any = year // 1_00
_UpperCamelCase : List[Any] = (5 * (century % 4) + 2) % 7
_UpperCamelCase : Tuple = year % 1_00
_UpperCamelCase : Optional[int] = centurian % 12
_UpperCamelCase : Tuple = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_UpperCamelCase : List[Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_UpperCamelCase : Optional[int] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Any:
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _UpperCAmelCase ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None ) -> List[str]:
UpperCamelCase_ = {}
UpperCamelCase_ = {}
if prompt is not None:
UpperCamelCase_ = prompt
if generate_kwargs is not None:
UpperCamelCase_ = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
UpperCamelCase_ = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
UpperCamelCase_ = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _UpperCAmelCase , **_UpperCAmelCase ) -> int:
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=None ) -> str:
UpperCamelCase_ = load_image(_UpperCAmelCase )
if prompt is not None:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(
f"""Received an invalid text input, got - {type(_UpperCAmelCase )} - but expected a single string. """
'Note also that one single text can be provided for conditional image to text generation.' )
UpperCamelCase_ = self.model.config.model_type
if model_type == "git":
UpperCamelCase_ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework )
UpperCamelCase_ = self.tokenizer(text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids
UpperCamelCase_ = [self.tokenizer.cls_token_id] + input_ids
UpperCamelCase_ = torch.tensor(_UpperCAmelCase ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
UpperCamelCase_ = self.image_processor(images=_UpperCAmelCase , header_text=_UpperCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
UpperCamelCase_ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework )
UpperCamelCase_ = self.tokenizer(_UpperCAmelCase , return_tensors=self.framework )
model_inputs.update(_UpperCAmelCase )
else:
raise ValueError(f"""Model type {model_type} does not support conditional text generation""" )
else:
UpperCamelCase_ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
UpperCamelCase_ = None
return model_inputs
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=None ) -> Any:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , _UpperCAmelCase )
and all(x is None for x in model_inputs['input_ids'] )
):
UpperCamelCase_ = None
if generate_kwargs is None:
UpperCamelCase_ = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
UpperCamelCase_ = model_inputs.pop(self.model.main_input_name )
UpperCamelCase_ = self.model.generate(_UpperCAmelCase , **_UpperCAmelCase , **_UpperCAmelCase )
return model_outputs
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Any:
UpperCamelCase_ = []
for output_ids in model_outputs:
UpperCamelCase_ = {
'generated_text': self.tokenizer.decode(
_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , )
}
records.append(_UpperCAmelCase )
return records
| 23 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
"""simple docstring"""
@staticmethod
def _lowercase ( *_snake_case , **_snake_case ) -> str:
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _lowercase ( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
_UpperCamelCase : int = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Any = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def _lowercase ( self , _snake_case , _snake_case ) -> List[str]:
_UpperCamelCase : int = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
] , )
@require_torch
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[int] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
_UpperCamelCase : List[Any] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
@slow
@require_torch
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[Any] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : str = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : Dict = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def _lowercase ( self ) -> List[Any]:
pass
| 683 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCAmelCase)
class lowerCAmelCase ( __lowerCAmelCase):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__lowercase : str = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True})
__lowercase : ClassVar[Features] = Features({'''question''': Value('''string'''), '''context''': Value('''string''')})
__lowercase : ClassVar[Features] = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string'''),
'''answer_start''': Value('''int32'''),
})
})
__lowercase : str = "question"
__lowercase : str = "context"
__lowercase : str = "answers"
@property
def lowerCAmelCase ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 24 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_UpperCAmelCase : Tuple = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 683 | 0 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
a_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
for attribute in key.split("."):
SCREAMING_SNAKE_CASE : List[Any] = getattr(_a , _a)
if weight_type is not None:
SCREAMING_SNAKE_CASE : str = getattr(_a , _a).shape
else:
SCREAMING_SNAKE_CASE : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}")
if weight_type == "weight":
SCREAMING_SNAKE_CASE : Any = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : List[Any] = value
else:
SCREAMING_SNAKE_CASE : Any = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : int = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_a , _a , _a , _a , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE : str = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(".")[:-1]) != key):
# special case since naming is very similar
continue
SCREAMING_SNAKE_CASE : Optional[int] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.split(_a)[0].split(".")[-2]
SCREAMING_SNAKE_CASE : Dict = mapped_key.replace("*" , _a)
if "weight_g" in name:
SCREAMING_SNAKE_CASE : Any = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : List[str] = "weight_v"
elif "bias" in name:
SCREAMING_SNAKE_CASE : str = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE : Any = "weight"
else:
SCREAMING_SNAKE_CASE : Optional[Any] = None
set_recursively(_a , _a , _a , _a , _a)
continue
if not is_used:
unused_weights.append(_a)
logger.warning(f"Unused weights: {unused_weights}")
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[Any] = full_name.split("conv_layers.")[-1]
SCREAMING_SNAKE_CASE : Optional[Any] = name.split(".")
SCREAMING_SNAKE_CASE : Any = int(items[0])
SCREAMING_SNAKE_CASE : Any = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.")
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.")
SCREAMING_SNAKE_CASE : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.")
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.")
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
else:
unused_weights.append(_a)
@torch.no_grad()
def lowerCamelCase__ ( _a , _a , _a=None , _a=None , _a=True):
if config_path is not None:
SCREAMING_SNAKE_CASE : Tuple = UniSpeechSatConfig.from_pretrained(_a)
else:
SCREAMING_SNAKE_CASE : List[Any] = UniSpeechSatConfig()
SCREAMING_SNAKE_CASE : List[str] = ""
if is_finetuned:
SCREAMING_SNAKE_CASE : Dict = UniSpeechSatForCTC(_a)
else:
SCREAMING_SNAKE_CASE : str = UniSpeechSatForPreTraining(_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/")[:-1])})
SCREAMING_SNAKE_CASE : Union[str, Any] = model[0].eval()
recursively_load_weights(_a , _a)
hf_wavavec.save_pretrained(_a)
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
a_ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 25 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase="attention" ) -> List[str]:
_UpperCamelCase : Dict = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
_UpperCamelCase : int = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_UpperCamelCase : str = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
_UpperCamelCase : Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_UpperCamelCase : Any = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
_UpperCamelCase : Optional[int] = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_UpperCamelCase : Optional[Any] = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
_UpperCamelCase : List[Any] = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[str]:
if split_mlp_wi:
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
_UpperCamelCase : Tuple = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
_UpperCamelCase : Optional[Any] = (wi_a, wi_a)
else:
_UpperCamelCase : str = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def snake_case__ ( UpperCamelCase ,*, UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ) -> int:
_UpperCamelCase : Any = traverse_util.flatten_dict(variables['''target'''] )
_UpperCamelCase : Optional[Any] = {'''/'''.join(UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_UpperCamelCase : str = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,UpperCamelCase )
_UpperCamelCase : Optional[int] = collections.OrderedDict()
# Shared embeddings.
_UpperCamelCase : str = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''attention''' )
_UpperCamelCase : Tuple = layer_norm
_UpperCamelCase : int = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : Dict = v.T
# Block i, layer 1 (MLP).
_UpperCamelCase : Dict = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : int = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,UpperCamelCase )
_UpperCamelCase : Union[str, Any] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Optional[Any] = wi[1].T
else:
_UpperCamelCase : List[Any] = wi.T
_UpperCamelCase : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : Union[str, Any] = tax_relpos_bias_lookup(
UpperCamelCase ,UpperCamelCase ,'''encoder''' ).T
_UpperCamelCase : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_UpperCamelCase : List[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''encoder''' ).T
_UpperCamelCase : Optional[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''self_attention''' )
_UpperCamelCase : int = layer_norm
_UpperCamelCase : Union[str, Any] = k.T
_UpperCamelCase : Optional[int] = o.T
_UpperCamelCase : Dict = q.T
_UpperCamelCase : Tuple = v.T
# Block i, layer 1 (Cross Attention).
_UpperCamelCase : str = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''encoder_decoder_attention''' )
_UpperCamelCase : Dict = layer_norm
_UpperCamelCase : Optional[int] = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : str = v.T
# Block i, layer 2 (MLP).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : List[Any] = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,UpperCamelCase )
_UpperCamelCase : List[str] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Union[str, Any] = wi[1].T
else:
_UpperCamelCase : Dict = wi.T
_UpperCamelCase : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : int = tax_relpos_bias_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ).T
_UpperCamelCase : Optional[int] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_UpperCamelCase : str = old['''decoder/logits_dense/kernel'''].T
return new
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
_UpperCamelCase : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : str = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : int = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_UpperCamelCase : Any = state_dict['''shared.weight''']
return state_dict
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Any:
_UpperCamelCase : List[Any] = checkpoints.load_tax_checkpoint(UpperCamelCase )
_UpperCamelCase : str = convert_tax_to_pytorch(
UpperCamelCase ,num_layers=config.num_layers ,is_encoder_only=UpperCamelCase ,scalable_attention=UpperCamelCase )
_UpperCamelCase : Optional[Any] = make_state_dict(UpperCamelCase ,UpperCamelCase )
model.load_state_dict(UpperCamelCase ,strict=UpperCamelCase )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,UpperCamelCase = False ,) -> int:
_UpperCamelCase : int = MTaConfig.from_json_file(UpperCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_UpperCamelCase : Optional[int] = UMTaEncoderModel(UpperCamelCase )
else:
_UpperCamelCase : Optional[int] = UMTaForConditionalGeneration(UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCamelCase )
print('''Done''' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 683 | 0 |
'''simple docstring'''
from __future__ import annotations
class _A :
def __init__( self : int , __magic_name__ : str , __magic_name__ : str ) -> Any:
"""simple docstring"""
__snake_case , __snake_case : str = text, pattern
__snake_case , __snake_case : List[Any] = len(__magic_name__ ), len(__magic_name__ )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : str ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowercase__ ( self : List[Any] , __magic_name__ : int ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowercase__ ( self : Union[str, Any] ) -> list[int]:
"""simple docstring"""
__snake_case : Tuple = []
for i in range(self.textLen - self.patLen + 1 ):
__snake_case : List[Any] = self.mismatch_in_text(__magic_name__ )
if mismatch_index == -1:
positions.append(__magic_name__ )
else:
__snake_case : List[Any] = self.match_in_pattern(self.text[mismatch_index] )
__snake_case : Tuple = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__UpperCamelCase = "ABAABA"
__UpperCamelCase = "AB"
__UpperCamelCase = BoyerMooreSearch(text, pattern)
__UpperCamelCase = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 26 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_UpperCAmelCase : int = 100
_UpperCAmelCase : List[Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_UpperCAmelCase : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def snake_case__ ( UpperCamelCase ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_UpperCamelCase : set[int] = set()
_UpperCamelCase : int
_UpperCamelCase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def snake_case__ ( UpperCamelCase = 50_00 ) -> int | None:
for number_to_partition in range(1 ,UpperCamelCase ):
if len(partition(UpperCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 683 | 0 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 27 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_UpperCAmelCase : Dict = """bart"""
_UpperCAmelCase : List[str] = True
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> int:
if LOAD_DENSE_INDEX:
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase : Tuple = qar_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase : Any = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase : Dict = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase : Tuple = sas_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = make_qa_sas_model(
model_name='''t5-small''' ,from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' ,device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> List[Any]:
if LOAD_DENSE_INDEX:
_UpperCamelCase : str = faiss.StandardGpuResources()
_UpperCamelCase : Optional[int] = datasets.load_dataset(path='''wiki_snippets''' ,name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase : List[str] = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(wikiaab_passages.num_rows, 1_28) ,)
_UpperCamelCase : Any = faiss.IndexFlatIP(1_28 )
_UpperCamelCase : str = faiss.index_cpu_to_gpu(UpperCamelCase ,1 ,UpperCamelCase )
wikiaab_gpu_index_flat.add(UpperCamelCase ) # TODO fix for larger GPU
else:
_UpperCamelCase, _UpperCamelCase : Optional[int] = (None, None)
_UpperCamelCase : int = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : List[Any] = datasets.load_dataset('''eli5''' ,name='''LFQA_reddit''' )
_UpperCamelCase : Optional[int] = elia['''train_eli5''']
_UpperCamelCase : Any = np.memmap(
'''eli5_questions_reps.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(elia_train.num_rows, 1_28) )
_UpperCamelCase : Optional[Any] = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(UpperCamelCase )
return (elia_train, eli5_train_q_index)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_indexes()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_models()
_UpperCAmelCase , _UpperCAmelCase : int = load_train_data()
def snake_case__ ( UpperCamelCase ,UpperCamelCase=10 ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = embed_questions_for_retrieval([question] ,UpperCamelCase ,UpperCamelCase )
_UpperCamelCase, _UpperCamelCase : Optional[Any] = eli5_train_q_index.search(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = [elia_train[int(UpperCamelCase )] for i in I[0]]
return nn_examples
def snake_case__ ( UpperCamelCase ,UpperCamelCase="wiki40b" ,UpperCamelCase="dense" ,UpperCamelCase=10 ) -> Optional[int]:
if source == "none":
_UpperCamelCase, _UpperCamelCase : Dict = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase, _UpperCamelCase : str = query_qa_dense_index(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
else:
_UpperCamelCase, _UpperCamelCase : str = query_es_index(
UpperCamelCase ,UpperCamelCase ,index_name='''english_wiki40b_snippets_100w''' ,n_results=UpperCamelCase ,)
_UpperCamelCase : Optional[int] = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase : Optional[Any] = '''question: {} context: {}'''.format(UpperCamelCase ,UpperCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCamelCase : None),
} )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=64 ,UpperCamelCase=2_56 ,UpperCamelCase=False ,UpperCamelCase=2 ,UpperCamelCase=0.95 ,UpperCamelCase=0.8 ) -> Optional[Any]:
with torch.no_grad():
_UpperCamelCase : Any = qa_sas_generate(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,num_answers=1 ,num_beams=UpperCamelCase ,min_len=UpperCamelCase ,max_len=UpperCamelCase ,do_sample=UpperCamelCase ,temp=UpperCamelCase ,top_p=UpperCamelCase ,top_k=UpperCamelCase ,max_input_length=10_24 ,device='''cuda:0''' ,)[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_UpperCAmelCase : str = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_UpperCAmelCase : Tuple = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_UpperCAmelCase : Dict = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_UpperCAmelCase : List[str] = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_UpperCAmelCase : Optional[int] = st.sidebar.checkbox("""Demo options""")
if demo_options:
_UpperCAmelCase : List[str] = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_UpperCAmelCase : List[Any] = action_list.index(action_st)
_UpperCAmelCase : Tuple = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_UpperCAmelCase : Optional[Any] = show_type == """Show full text of passages"""
else:
_UpperCAmelCase : Union[str, Any] = 3
_UpperCAmelCase : str = True
_UpperCAmelCase : str = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_UpperCAmelCase : Optional[Any] = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_UpperCAmelCase : Optional[Any] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_UpperCAmelCase : Dict = """wiki40b"""
_UpperCAmelCase : str = """dense"""
_UpperCAmelCase : List[str] = """beam"""
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : List[str] = 64
_UpperCAmelCase : List[Any] = 256
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = st.sidebar.checkbox("""Generation options""")
if generate_options:
_UpperCAmelCase : Union[str, Any] = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_UpperCAmelCase : Dict = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_UpperCAmelCase : List[Any] = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_UpperCAmelCase : List[str] = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[int] = None
# start main text
_UpperCAmelCase : Union[str, Any] = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_UpperCAmelCase : int = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_UpperCAmelCase : Any = st.text_input("""Enter your question here:""", """""")
else:
_UpperCAmelCase : Tuple = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_UpperCAmelCase , _UpperCAmelCase : str = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_UpperCAmelCase , _UpperCAmelCase : List[Any] = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_UpperCAmelCase : int = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_UpperCAmelCase : int = support_list[:10]
_UpperCAmelCase : Tuple = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_UpperCAmelCase , _UpperCAmelCase : Any = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_UpperCAmelCase : Tuple = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_UpperCAmelCase : List[Any] = res[1].strip()
if sec_titles == "":
_UpperCAmelCase : Optional[int] = """[{}]({})""".format(res[0], wiki_url)
else:
_UpperCAmelCase : Optional[int] = sec_titles.split(""" & """)
_UpperCAmelCase : Tuple = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_UpperCAmelCase : Dict = find_nearest_training(question)
_UpperCAmelCase : List[Any] = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_UpperCAmelCase : List[Any] = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_UpperCAmelCase : List[Any] = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 683 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> Optional[int]:
_UpperCamelCase : int = value
_UpperCamelCase : Node | None = None # Added in order to delete a node easier
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> List[Any]:
_UpperCamelCase : str = root
def __str__( self ) -> str:
return str(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if new_children is not None: # reset its kids
_UpperCamelCase : Union[str, Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_snake_case ): # If it is the right children
_UpperCamelCase : str = new_children
else:
_UpperCamelCase : Any = new_children
else:
_UpperCamelCase : Any = new_children
def _lowercase ( self , _snake_case ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _lowercase ( self ) -> bool:
return self.root is None
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : List[Any] = Node(_snake_case ) # create a new Node
if self.empty(): # if Tree is empty
_UpperCamelCase : Optional[Any] = new_node # set its root
else: # Tree is not empty
_UpperCamelCase : int = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_UpperCamelCase : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
_UpperCamelCase : Union[str, Any] = parent_node.left
else:
if parent_node.right is None:
_UpperCamelCase : Any = new_node
break
else:
_UpperCamelCase : str = parent_node.right
_UpperCamelCase : Any = parent_node
def _lowercase ( self , *_snake_case ) -> None:
for value in values:
self.__insert(_snake_case )
def _lowercase ( self , _snake_case ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
_UpperCamelCase : List[str] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_UpperCamelCase : Optional[Any] = node.left if value < node.value else node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
if self.root is None:
return None
_UpperCamelCase : Dict = self.root
if not self.empty():
while node.right is not None:
_UpperCamelCase : Tuple = node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
_UpperCamelCase : Optional[Any] = self.root
if self.root is None:
return None
if not self.empty():
_UpperCamelCase : Optional[int] = self.root
while node.left is not None:
_UpperCamelCase : List[str] = node.left
return node
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : str = self.search(_snake_case ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_snake_case , _snake_case )
elif node.left is None: # Has only right children
self.__reassign_nodes(_snake_case , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_snake_case , node.left )
else:
_UpperCamelCase : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_UpperCamelCase : int = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _lowercase ( self , _snake_case ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def _lowercase ( self , _snake_case=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if node:
self.inorder(_snake_case , node.left )
arr.append(node.value )
self.inorder(_snake_case , node.right )
def _lowercase ( self , _snake_case , _snake_case ) -> int:
_UpperCamelCase : list[int] = []
self.inorder(_snake_case , _snake_case ) # append all values to list using inorder traversal
return arr[k - 1]
def snake_case__ ( UpperCamelCase ) -> list[Node]:
_UpperCamelCase : int = []
if curr_node is not None:
_UpperCamelCase : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def snake_case__ ( ) -> None:
_UpperCamelCase : Any = (8, 3, 6, 1, 10, 14, 13, 4, 7)
_UpperCamelCase : Tuple = BinarySearchTree()
for i in testlist:
t.insert(UpperCamelCase )
# Prints all the elements of the list in order traversal
print(UpperCamelCase )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' ,t.get_max().value ) # type: ignore
print('''Min Value: ''' ,t.get_min().value ) # type: ignore
for i in testlist:
t.remove(UpperCamelCase )
print(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 683 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = CanineTokenizer
a__: Tuple = False
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ ( self ):
return CanineTokenizer.from_pretrained('''google/canine-s''' )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
lowerCamelCase_ = self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
lowerCamelCase_ = 1024
return tokenizer
@require_torch
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.canine_tokenizer
lowerCamelCase_ = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
lowerCamelCase_ = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = list(batch.input_ids.numpy()[0] )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.canine_tokenizer
lowerCamelCase_ = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('''input_ids''' , UpperCAmelCase )
self.assertIn('''attention_mask''' , UpperCAmelCase )
self.assertIn('''token_type_ids''' , UpperCAmelCase )
@require_torch
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.canine_tokenizer
lowerCamelCase_ = [
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
lowerCamelCase_ = tokenizer(
text_target=UpperCAmelCase , max_length=32 , padding='''max_length''' , truncation=UpperCAmelCase , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def UpperCAmelCase__ ( self ):
# safety check on max_len default value so we are sure the test works
lowerCamelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = ''' He is very happy, UNwant\u00E9d,running'''
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = tokenizer.__class__.from_pretrained(UpperCAmelCase )
lowerCamelCase_ = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
shutil.rmtree(UpperCAmelCase )
lowerCamelCase_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = ''' He is very happy, UNwant\u00E9d,running'''
lowerCamelCase_ = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
lowerCamelCase_ = chr(0Xe_007 )
additional_special_tokens.append(UpperCAmelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = tokenizer.__class__.from_pretrained(UpperCAmelCase )
lowerCamelCase_ = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertIn(UpperCAmelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase_ = tokenizer.__class__.from_pretrained(UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ , lowerCamelCase_ = self.get_clean_sequence(UpperCAmelCase )
# a special token for Canine can be defined as follows:
lowerCamelCase_ = 0Xe_005
lowerCamelCase_ = chr(UpperCAmelCase )
tokenizer.add_special_tokens({'''cls_token''': special_token} )
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 1 )
lowerCamelCase_ = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , input_encoded + special_token_id )
lowerCamelCase_ = tokenizer.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = chr(0Xe_005 )
lowerCamelCase_ = chr(0Xe_006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=UpperCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} )
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 1 )
self.assertEqual(len(UpperCAmelCase ) , 1 )
self.assertEqual(token_a[0] , UpperCAmelCase )
self.assertEqual(token_a[0] , UpperCAmelCase )
@require_tokenizers
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
lowerCamelCase_ = 0Xe_006
lowerCamelCase_ = chr(UpperCAmelCase )
lowerCamelCase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(UpperCAmelCase )
tokenizer.from_pretrained(UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCamelCase_ = json.load(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCamelCase_ = json.load(UpperCAmelCase )
# a special token for Canine can be defined as follows:
lowerCamelCase_ = 0Xe_006
lowerCamelCase_ = chr(UpperCAmelCase )
lowerCamelCase_ = [new_token_a]
lowerCamelCase_ = [new_token_a]
with open(os.path.join(UpperCAmelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase_ = tokenizer_class.from_pretrained(UpperCAmelCase , extra_ids=0 )
self.assertIn(UpperCAmelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
lowerCamelCase_ = 0Xe_007
lowerCamelCase_ = chr(UpperCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase_ = [AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase )]
lowerCamelCase_ = tokenizer_class.from_pretrained(
UpperCAmelCase , additional_special_tokens=UpperCAmelCase , extra_ids=0 )
self.assertIn(UpperCAmelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''hello world'''
if self.space_between_special_tokens:
lowerCamelCase_ = '''[CLS] hello world [SEP]'''
else:
lowerCamelCase_ = input
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.decode(UpperCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(UpperCAmelCase , [output, output.lower()] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
lowerCamelCase_ = '''a'''
lowerCamelCase_ = ord(UpperCAmelCase )
for attr in attributes_list:
setattr(UpperCAmelCase , attr + '''_id''' , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , attr + '''_id''' ) , UpperCAmelCase )
setattr(UpperCAmelCase , attr + '''_id''' , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , attr + '''_id''' ) , UpperCAmelCase )
setattr(UpperCAmelCase , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens_ids''' ) , [] )
lowerCamelCase_ = 0Xe_006
lowerCamelCase_ = chr(UpperCAmelCase )
setattr(UpperCAmelCase , '''additional_special_tokens_ids''' , [additional_special_token_id] )
self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens''' ) , [additional_special_token] )
self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens_ids''' ) , [additional_special_token_id] )
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
| 29 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
_UpperCAmelCase : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
_UpperCAmelCase : int = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : Dict = 'whisper'
A__ : Tuple = ['past_key_values']
A__ : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _snake_case=51865 , _snake_case=80 , _snake_case=6 , _snake_case=4 , _snake_case=6 , _snake_case=4 , _snake_case=1536 , _snake_case=1536 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=50257 , _snake_case=True , _snake_case=True , _snake_case="gelu" , _snake_case=256 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=False , _snake_case=1500 , _snake_case=448 , _snake_case=50256 , _snake_case=50256 , _snake_case=50256 , _snake_case=None , _snake_case=[220, 50256] , _snake_case=False , _snake_case=256 , _snake_case=False , _snake_case=0.05 , _snake_case=10 , _snake_case=2 , _snake_case=0.0 , _snake_case=10 , _snake_case=0 , _snake_case=7 , **_snake_case , ) -> Any:
_UpperCamelCase : Union[str, Any] = vocab_size
_UpperCamelCase : Union[str, Any] = num_mel_bins
_UpperCamelCase : List[str] = d_model
_UpperCamelCase : str = encoder_layers
_UpperCamelCase : Optional[int] = encoder_attention_heads
_UpperCamelCase : str = decoder_layers
_UpperCamelCase : Tuple = decoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : Optional[int] = encoder_ffn_dim
_UpperCamelCase : Any = dropout
_UpperCamelCase : Optional[Any] = attention_dropout
_UpperCamelCase : List[Any] = activation_dropout
_UpperCamelCase : int = activation_function
_UpperCamelCase : List[Any] = init_std
_UpperCamelCase : Optional[int] = encoder_layerdrop
_UpperCamelCase : str = decoder_layerdrop
_UpperCamelCase : List[str] = use_cache
_UpperCamelCase : Optional[Any] = encoder_layers
_UpperCamelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase : List[str] = max_source_positions
_UpperCamelCase : Optional[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase : str = classifier_proj_size
_UpperCamelCase : List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase : int = apply_spec_augment
_UpperCamelCase : str = mask_time_prob
_UpperCamelCase : int = mask_time_length
_UpperCamelCase : List[Any] = mask_time_min_masks
_UpperCamelCase : List[str] = mask_feature_prob
_UpperCamelCase : Optional[int] = mask_feature_length
_UpperCamelCase : Union[str, Any] = mask_feature_min_masks
_UpperCamelCase : Union[str, Any] = median_filter_width
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , suppress_tokens=_snake_case , begin_suppress_tokens=_snake_case , **_snake_case , )
class UpperCAmelCase ( a_ ):
"""simple docstring"""
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
_UpperCamelCase : Dict = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
_UpperCamelCase : Tuple = {0: '''batch'''}
else:
_UpperCamelCase : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''' )
return common_inputs
def _lowercase ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , _snake_case = 22050 , _snake_case = 5.0 , _snake_case = 220 , ) -> Mapping[str, Any]:
_UpperCamelCase : Optional[int] = OrderedDict()
_UpperCamelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_snake_case , framework=_snake_case , sampling_rate=_snake_case , time_duration=_snake_case , frequency=_snake_case , )
_UpperCamelCase : int = encoder_inputs['''input_features'''].shape[2]
_UpperCamelCase : List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
_UpperCamelCase : str = super().generate_dummy_inputs(
preprocessor.tokenizer , _snake_case , _snake_case , _snake_case , _snake_case )
_UpperCamelCase : Union[str, Any] = encoder_inputs.pop('''input_features''' )
_UpperCamelCase : Dict = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
_UpperCamelCase : List[str] = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def _lowercase ( self ) -> float:
return 1E-3
| 683 | 0 |
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
for i in range(1 , 1001 ):
total += i**i
return str(_lowercase )[-10:]
if __name__ == "__main__":
print(solution()) | 30 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase : int = parser.parse_args()
if args.model_type == "roberta":
_UpperCAmelCase : Union[str, Any] = RobertaForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase : int = """roberta"""
elif args.model_type == "gpt2":
_UpperCAmelCase : Optional[int] = GPTaLMHeadModel.from_pretrained(args.model_name)
_UpperCAmelCase : Optional[int] = """transformer"""
_UpperCAmelCase : Tuple = model.state_dict()
_UpperCAmelCase : int = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_UpperCAmelCase : Optional[Any] = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_UpperCAmelCase : Tuple = f"""{prefix}.embeddings.{w}.weight"""
_UpperCAmelCase : Optional[Any] = state_dict[param_name]
for w in ["weight", "bias"]:
_UpperCAmelCase : Union[str, Any] = f"""{prefix}.embeddings.LayerNorm.{w}"""
_UpperCAmelCase : str = state_dict[param_name]
# Transformer Blocks #
_UpperCAmelCase : Dict = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_UpperCAmelCase : str = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
_UpperCAmelCase : Any = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_UpperCAmelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_UpperCAmelCase : Dict = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase : int = state_dict[f"""lm_head.dense.{w}"""]
_UpperCAmelCase : int = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_UpperCAmelCase : List[str] = state_dict[f"""{prefix}.ln_f.{w}"""]
_UpperCAmelCase : Any = state_dict["""lm_head.weight"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 683 | 0 |
import os
import time
import numpy as np
import onnxruntime as ort
lowerCamelCase__ : int = '1'
lowerCamelCase__ : Optional[int] = '0'
lowerCamelCase__ : Optional[Any] = '1'
lowerCamelCase__ : int = ort.SessionOptions()
lowerCamelCase__ : List[Any] = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('Create inference session...')
lowerCamelCase__ : List[str] = ['TensorrtExecutionProvider', 'CUDAExecutionProvider']
lowerCamelCase__ : List[str] = ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider)
lowerCamelCase__ : Union[str, Any] = ort.RunOptions()
lowerCamelCase__ : int = 128
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Tuple = np.ones((batch, sequence), dtype=np.intaa)
lowerCamelCase__ : Union[str, Any] = np.ones((batch, sequence), dtype=np.intaa)
lowerCamelCase__ : Any = np.ones((batch, sequence), dtype=np.intaa)
print('Warm up phase...')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Start inference...')
lowerCamelCase__ : str = time.time()
lowerCamelCase__ : int = 2_000
lowerCamelCase__ : Any = {}
for iter in range(max_iters):
lowerCamelCase__ : str = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 1_000 / max_iters)) | 31 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self , _snake_case , _snake_case ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : int = None
_UpperCamelCase : int = 20
_UpperCamelCase : Any = self._get_uniform_logits(batch_size=2 , length=_snake_case )
# tweak scores to not be uniform anymore
_UpperCamelCase : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_UpperCamelCase : Dict = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_UpperCamelCase : Any = jax.nn.softmax(_snake_case , axis=-1 )
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : List[str] = FlaxTemperatureLogitsWarper(temperature=1.3 )
_UpperCamelCase : List[str] = jax.nn.softmax(temp_dist_warper_sharper(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
_UpperCamelCase : str = jax.nn.softmax(temp_dist_warper_smoother(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase ( self ) -> Any:
_UpperCamelCase : List[Any] = None
_UpperCamelCase : Optional[int] = 10
_UpperCamelCase : Any = 2
# create ramp distribution
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy()
_UpperCamelCase : Union[str, Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_UpperCamelCase : Optional[int] = 5
_UpperCamelCase : str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_UpperCamelCase : Union[str, Any] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, length) ).copy()
_UpperCamelCase : Optional[Any] = top_k_warp_safety_check(_snake_case , _snake_case , cur_len=_snake_case )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : Any = None
_UpperCamelCase : Any = 10
_UpperCamelCase : List[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_UpperCamelCase : Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_UpperCamelCase : List[str] = FlaxTopPLogitsWarper(0.8 )
_UpperCamelCase : Dict = np.exp(top_p_warp(_snake_case , _snake_case , cur_len=_snake_case ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_UpperCamelCase : Optional[int] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# check edge cases with negative and extreme logits
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_UpperCamelCase : Tuple = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_UpperCamelCase : Tuple = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_UpperCamelCase : Dict = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase ( self ) -> Dict:
_UpperCamelCase : List[Any] = 20
_UpperCamelCase : Optional[int] = 4
_UpperCamelCase : int = 0
_UpperCamelCase : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
# check that min length is applied at length 5
_UpperCamelCase : Any = ids_tensor((batch_size, 20) , vocab_size=20 )
_UpperCamelCase : int = 5
_UpperCamelCase : List[Any] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
_UpperCamelCase : Optional[int] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = 15
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Optional[int] = 20
_UpperCamelCase : Union[str, Any] = 4
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
# check that all scores are -inf except the bos_token_id score
_UpperCamelCase : Union[str, Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : str = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : str = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_UpperCamelCase : List[str] = 3
_UpperCamelCase : Tuple = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 20
_UpperCamelCase : Tuple = 4
_UpperCamelCase : Any = 0
_UpperCamelCase : str = 5
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
# check that all scores are -inf except the eos_token_id when max_length is reached
_UpperCamelCase : Optional[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
_UpperCamelCase : Dict = 4
_UpperCamelCase : Dict = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : int = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_UpperCamelCase : Optional[int] = 3
_UpperCamelCase : Any = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 4
_UpperCamelCase : Optional[Any] = 10
_UpperCamelCase : Dict = 15
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : Optional[Any] = 1
_UpperCamelCase : List[Any] = 15
# dummy input_ids and scores
_UpperCamelCase : Optional[int] = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Any = input_ids.copy()
_UpperCamelCase : int = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Tuple = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : List[str] = 10
# no processor list
_UpperCamelCase : Dict = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
# with processor list
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : Optional[Any] = processor(_snake_case , _snake_case , cur_len=_snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Tuple = 4
_UpperCamelCase : int = 10
_UpperCamelCase : List[Any] = 15
_UpperCamelCase : Dict = 2
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Optional[int] = 15
# dummy input_ids and scores
_UpperCamelCase : Tuple = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Optional[Any] = input_ids.copy()
_UpperCamelCase : List[str] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : Union[str, Any] = 10
# no processor list
def run_no_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : List[Any] = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
return scores
# with processor list
def run_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : List[str] = processor(_snake_case , _snake_case , cur_len=_snake_case )
return scores
_UpperCamelCase : Dict = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jitted_run_no_processor_list(_snake_case , _snake_case , _snake_case )
_UpperCamelCase : Any = jitted_run_processor_list(_snake_case , _snake_case , _snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 683 | 0 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( A__ ):
__A : List[str] = (EulerDiscreteScheduler,)
__A : Optional[int] = 10
def UpperCamelCase( self , **_UpperCamelCase ):
_UpperCAmelCase = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_UpperCamelCase )
return config
def UpperCamelCase( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def UpperCamelCase( self ):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase )
def UpperCamelCase( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase )
def UpperCamelCase( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase )
_UpperCAmelCase = output.prev_sample
_UpperCAmelCase = torch.sum(torch.abs(_UpperCamelCase ) )
_UpperCAmelCase = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def UpperCamelCase( self ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
_UpperCAmelCase = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase )
_UpperCAmelCase = output.prev_sample
_UpperCAmelCase = torch.sum(torch.abs(_UpperCamelCase ) )
_UpperCAmelCase = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def UpperCamelCase( self ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_UpperCAmelCase = sample.to(_UpperCamelCase )
for t in scheduler.timesteps:
_UpperCAmelCase = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase )
_UpperCAmelCase = output.prev_sample
_UpperCAmelCase = torch.sum(torch.abs(_UpperCamelCase ) )
_UpperCAmelCase = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def UpperCamelCase( self ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_UpperCAmelCase = sample.to(_UpperCamelCase )
for t in scheduler.timesteps:
_UpperCAmelCase = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase )
_UpperCAmelCase = output.prev_sample
_UpperCAmelCase = torch.sum(torch.abs(_UpperCamelCase ) )
_UpperCAmelCase = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3 | 32 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_UpperCAmelCase : Optional[int] = pytest.mark.integration
@pytest.mark.parametrize('''path''' ,['''paws''', '''csv'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Dict:
inspect_dataset(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' ,['''accuracy'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> int:
inspect_metric(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : List[str] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
_UpperCamelCase : List[str] = get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[str]:
with pytest.raises(UpperCamelCase ):
get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
@pytest.mark.parametrize(
'''path, expected''' ,[
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : int = get_dataset_config_names(UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' ,[
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
_UpperCamelCase : Dict = get_dataset_infos(UpperCamelCase )
assert list(infos.keys() ) == expected_configs
_UpperCamelCase : Dict = expected_configs[0]
assert expected_config in infos
_UpperCamelCase : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = get_dataset_infos(UpperCamelCase )
assert expected_config in infos
_UpperCamelCase : Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
with pytest.raises(UpperCamelCase ):
get_dataset_split_names(UpperCamelCase ,config_name=UpperCamelCase )
| 683 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bool:
snake_case__ = str(__lowerCAmelCase )
return n == n[::-1]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100_0000 ) -> Union[str, Any]:
snake_case__ = 0
for i in range(1 , __lowerCAmelCase ):
if is_palindrome(__lowerCAmelCase ) and is_palindrome(bin(__lowerCAmelCase ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 33 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCamelCase : Any = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def _lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def _lowercase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase : Optional[int] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
_UpperCamelCase : int = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Tuple = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_UpperCamelCase : int = DDPMScheduler()
_UpperCamelCase : Optional[int] = AudioDiffusionPipeline(vqvae=_snake_case , unet=self.dummy_unet , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case , steps=4 )
_UpperCamelCase : Union[str, Any] = output.audios[0]
_UpperCamelCase : Union[str, Any] = output.images[0]
_UpperCamelCase : str = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : int = pipe(generator=_snake_case , steps=4 , return_dict=_snake_case )
_UpperCamelCase : int = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_UpperCamelCase : List[str] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : List[str] = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : int = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_UpperCamelCase : Dict = DDIMScheduler()
_UpperCamelCase : str = self.dummy_vqvae_and_unet
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : Optional[Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_UpperCamelCase : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Tuple = pipe(raw_audio=_snake_case , generator=_snake_case , start_step=5 , steps=10 )
_UpperCamelCase : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_UpperCamelCase : Any = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Tuple = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : Any = self.dummy_unet_condition
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_snake_case , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : Union[str, Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : int = torch.rand((1, 1, 10) )
_UpperCamelCase : Optional[Any] = pipe(generator=_snake_case , encoding=_snake_case )
_UpperCamelCase : Dict = output.images[0]
_UpperCamelCase : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Any = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = torch_device
_UpperCamelCase : int = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
_UpperCamelCase : str = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case )
_UpperCamelCase : List[Any] = output.audios[0]
_UpperCamelCase : List[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_UpperCamelCase : Union[str, Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Union[str, Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 683 | 0 |
"""simple docstring"""
import argparse
import struct
import unittest
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> None:
UpperCamelCase = data
# Initialize hash values
UpperCamelCase = [
0X6A09_E667,
0XBB67_AE85,
0X3C6E_F372,
0XA54F_F53A,
0X510E_527F,
0X9B05_688C,
0X1F83_D9AB,
0X5BE0_CD19,
]
# Initialize round constants
UpperCamelCase = [
0X428A_2F98,
0X7137_4491,
0XB5C0_FBCF,
0XE9B5_DBA5,
0X3956_C25B,
0X59F1_11F1,
0X923F_82A4,
0XAB1C_5ED5,
0XD807_AA98,
0X1283_5B01,
0X2431_85BE,
0X550C_7DC3,
0X72BE_5D74,
0X80DE_B1FE,
0X9BDC_06A7,
0XC19B_F174,
0XE49B_69C1,
0XEFBE_4786,
0X0FC1_9DC6,
0X240C_A1CC,
0X2DE9_2C6F,
0X4A74_84AA,
0X5CB0_A9DC,
0X76F9_88DA,
0X983E_5152,
0XA831_C66D,
0XB003_27C8,
0XBF59_7FC7,
0XC6E0_0BF3,
0XD5A7_9147,
0X06CA_6351,
0X1429_2967,
0X27B7_0A85,
0X2E1B_2138,
0X4D2C_6DFC,
0X5338_0D13,
0X650A_7354,
0X766A_0ABB,
0X81C2_C92E,
0X9272_2C85,
0XA2BF_E8A1,
0XA81A_664B,
0XC24B_8B70,
0XC76C_51A3,
0XD192_E819,
0XD699_0624,
0XF40E_3585,
0X106A_A070,
0X19A4_C116,
0X1E37_6C08,
0X2748_774C,
0X34B0_BCB5,
0X391C_0CB3,
0X4ED8_AA4A,
0X5B9C_CA4F,
0X682E_6FF3,
0X748F_82EE,
0X78A5_636F,
0X84C8_7814,
0X8CC7_0208,
0X90BE_FFFA,
0XA450_6CEB,
0XBEF9_A3F7,
0XC671_78F2,
]
UpperCamelCase = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> bytes:
UpperCamelCase = B'''\x80''' + (B'''\x00''' * (6_3 - (len(lowerCamelCase_) + 8) % 6_4))
UpperCamelCase = struct.pack('''>Q''' , (len(lowerCamelCase_) * 8))
return data + padding + big_endian_integer
def UpperCAmelCase__ ( self) -> None:
# Convert into blocks of 64 bytes
UpperCamelCase = [
self.preprocessed_data[x : x + 6_4]
for x in range(0 , len(self.preprocessed_data) , 6_4)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase = list(struct.unpack('''>16L''' , lowerCamelCase_))
# add 48 0-ed integers
words += [0] * 4_8
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.hashes
for index in range(0 , 6_4):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
UpperCamelCase = (
self.ror(words[index - 1_5] , 7)
^ self.ror(words[index - 1_5] , 1_8)
^ (words[index - 1_5] >> 3)
)
UpperCamelCase = (
self.ror(words[index - 2] , 1_7)
^ self.ror(words[index - 2] , 1_9)
^ (words[index - 2] >> 1_0)
)
UpperCamelCase = (
words[index - 1_6] + sa + words[index - 7] + sa
) % 0X1_0000_0000
# Compression
UpperCamelCase = self.ror(lowerCamelCase_ , 6) ^ self.ror(lowerCamelCase_ , 1_1) ^ self.ror(lowerCamelCase_ , 2_5)
UpperCamelCase = (e & f) ^ ((~e & 0XFFFF_FFFF) & g)
UpperCamelCase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0000_0000
UpperCamelCase = self.ror(lowerCamelCase_ , 2) ^ self.ror(lowerCamelCase_ , 1_3) ^ self.ror(lowerCamelCase_ , 2_2)
UpperCamelCase = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase = (sa + maj) % 0X1_0000_0000
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = (
g,
f,
e,
((d + tempa) % 0X1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0X1_0000_0000),
)
UpperCamelCase = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase = [
((element + mutated_hash_values[index]) % 0X1_0000_0000)
for index, element in enumerate(self.hashes)
]
UpperCamelCase = ''''''.join([hex(lowerCamelCase_)[2:].zfill(8) for value in self.hashes])
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> int:
return 0XFFFF_FFFF & (value << (3_2 - rotations)) | (value >> rotations)
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> None:
import hashlib
UpperCamelCase = bytes('''Test String''' , '''utf-8''')
self.assertEqual(SHAaaa(lowerCamelCase_).hash , hashlib.shaaaa(lowerCamelCase_).hexdigest())
def __snake_case ( ):
"""simple docstring"""
import doctest
doctest.testmod()
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''-s''' ,'''--string''' ,dest='''input_string''' ,default='''Hello World!! Welcome to Cryptography''' ,help='''Hash the string''' ,)
parser.add_argument(
'''-f''' ,'''--file''' ,dest='''input_file''' ,help='''Hash contents of a file''' )
UpperCamelCase = parser.parse_args()
UpperCamelCase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file ,'''rb''' ) as f:
UpperCamelCase = f.read()
else:
UpperCamelCase = bytes(_lowercase ,'''utf-8''' )
print(SHAaaa(_lowercase ).hash )
if __name__ == "__main__":
main() | 34 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCAmelCase : Tuple = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 683 | 0 |
from __future__ import annotations
def a ( A__ ) -> int:
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(A__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(A__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : Optional[int] = {
"""distilbert-base-uncased""": 512,
"""distilbert-base-uncased-distilled-squad""": 512,
"""distilbert-base-cased""": 512,
"""distilbert-base-cased-distilled-squad""": 512,
"""distilbert-base-german-cased""": 512,
"""distilbert-base-multilingual-cased""": 512,
}
_UpperCAmelCase : Any = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : Dict = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A__ : Union[str, Any] = ['input_ids', 'attention_mask']
A__ : Tuple = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ) -> int:
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
_UpperCamelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _snake_case ) != tokenize_chinese_chars
):
_UpperCamelCase : int = getattr(_snake_case , normalizer_state.pop('''type''' ) )
_UpperCamelCase : Optional[int] = do_lower_case
_UpperCamelCase : Dict = strip_accents
_UpperCamelCase : List[Any] = tokenize_chinese_chars
_UpperCamelCase : Tuple = normalizer_class(**_snake_case )
_UpperCamelCase : Dict = do_lower_case
def _lowercase ( self , _snake_case , _snake_case=None ) -> Optional[int]:
_UpperCamelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , _snake_case , _snake_case = None ) -> List[int]:
_UpperCamelCase : Union[str, Any] = [self.sep_token_id]
_UpperCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
_UpperCamelCase : Optional[Any] = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 683 | 0 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__lowercase : Any = logging.getLogger(__name__)
__lowercase : Any = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__lowercase : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : Optional[str] = field(
default=snake_case , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
__lowerCamelCase : Optional[str] = field(
default=snake_case , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case )} , )
__lowerCamelCase : Optional[str] = field(
default=snake_case , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
__lowerCamelCase : Optional[str] = field(
default=snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__lowerCamelCase : Optional[str] = field(
default=snake_case , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__lowerCamelCase : Optional[str] = field(
default=snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__lowerCamelCase : bool = field(
default=snake_case , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
__lowerCamelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__lowerCamelCase : bool = field(
default=snake_case , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def snake_case_ ( self ):
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : Optional[str] = field(
default=snake_case , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__lowerCamelCase : Optional[str] = field(
default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__lowerCamelCase : Optional[str] = field(default=snake_case , metadata={'''help''': '''The input training data file (a text file).'''} )
__lowerCamelCase : Optional[str] = field(
default=snake_case , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
__lowerCamelCase : Optional[str] = field(
default=snake_case , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
__lowerCamelCase : Optional[str] = field(
default=snake_case , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
__lowerCamelCase : bool = field(
default=snake_case , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__lowerCamelCase : Optional[int] = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
__lowerCamelCase : Optional[int] = field(
default=snake_case , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
__lowerCamelCase : Optional[int] = field(
default=snake_case , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
__lowerCamelCase : float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
__lowerCamelCase : bool = field(
default=snake_case , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def snake_case_ ( self ):
'''simple docstring'''
if self.train_file is not None:
snake_case : Tuple = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
snake_case : Optional[Any] = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def lowercase ( __A : Union[str, Any] , __A : List[Any] ) -> str:
'''simple docstring'''
with open(__A , """r""" , encoding="""utf-8""" ) as f:
snake_case : int = [json.loads(__A ) for line in f.read().splitlines() if (len(__A ) > 0 and not line.isspace())]
assert len(__A ) == len(__A )
snake_case : Optional[Any] = {c: dataset[c] for c in dataset.column_names}
snake_case : Dict = refs
return Dataset.from_dict(__A )
def lowercase ( ) -> str:
'''simple docstring'''
snake_case : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case , snake_case , snake_case : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case , snake_case , snake_case : str = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
snake_case : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , __A )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case : Dict = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
snake_case : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""train[:{data_args.validation_split_percentage}%]""" , )
snake_case : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""train[{data_args.validation_split_percentage}%:]""" , )
else:
snake_case : int = {}
if data_args.train_file is not None:
snake_case : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
snake_case : Dict = data_args.validation_file
snake_case : str = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
snake_case : str = """text"""
snake_case : List[str] = load_dataset(__A , data_files=__A )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case : Optional[Any] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
snake_case : Tuple = AutoConfig.from_pretrained(model_args.config_name , **__A )
elif model_args.model_name_or_path:
snake_case : Tuple = AutoConfig.from_pretrained(model_args.model_name_or_path , **__A )
else:
snake_case : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
snake_case : int = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
snake_case : Optional[Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__A )
elif model_args.model_name_or_path:
snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__A )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
snake_case : Optional[Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
snake_case : Any = AutoModelForMaskedLM.from_config(__A )
model.resize_token_embeddings(len(__A ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
snake_case : str = datasets["""train"""].column_names
else:
snake_case : Dict = datasets["""validation"""].column_names
snake_case : List[str] = """text""" if """text""" in column_names else column_names[0]
snake_case : int = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(__A : Tuple ):
# Remove empty lines
snake_case : int = [line for line in examples["""text"""] if len(__A ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=__A , truncation=__A , max_length=data_args.max_seq_length )
snake_case : Tuple = datasets.map(
__A , batched=__A , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
snake_case : List[str] = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
snake_case : Optional[int] = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
snake_case : int = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
snake_case : List[Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
snake_case : Optional[int] = DataCollatorForWholeWordMask(tokenizer=__A , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
snake_case : Optional[Any] = Trainer(
model=__A , args=__A , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=__A , data_collator=__A , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
snake_case : Any = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
snake_case : Optional[Any] = model_args.model_name_or_path
else:
snake_case : Any = None
snake_case : Dict = trainer.train(resume_from_checkpoint=__A )
trainer.save_model() # Saves the tokenizer too for easy upload
snake_case : Optional[Any] = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(__A , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
snake_case : List[str] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
snake_case : Union[str, Any] = trainer.evaluate()
snake_case : List[Any] = math.exp(eval_output["""eval_loss"""] )
snake_case : Any = perplexity
snake_case : Any = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(__A , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
return results
def lowercase ( __A : int ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 36 |
'''simple docstring'''
def snake_case__ ( UpperCamelCase ) -> list:
_UpperCamelCase : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCamelCase : List[str] = True
for i in range(0 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : int = False
for i in range(1 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : Optional[int] = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase : Optional[int] = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase : Union[str, Any] = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 683 | 0 |
def UpperCamelCase_ ( __a , __a ) -> int:
while b:
a__, a__ : int = b, a % b
return a
def UpperCamelCase_ ( __a , __a ) -> int:
return a if b == 0 else euclidean_gcd_recursive(__a , a % b )
def UpperCamelCase_ ( ) -> Dict:
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 37 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = checkpoint
_UpperCamelCase : int = {}
_UpperCamelCase : int = vae_state_dict['''encoder.conv_in.weight''']
_UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_in.bias''']
_UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_out.weight''']
_UpperCamelCase : Any = vae_state_dict['''encoder.conv_out.bias''']
_UpperCamelCase : List[Any] = vae_state_dict['''encoder.norm_out.weight''']
_UpperCamelCase : str = vae_state_dict['''encoder.norm_out.bias''']
_UpperCamelCase : str = vae_state_dict['''decoder.conv_in.weight''']
_UpperCamelCase : List[Any] = vae_state_dict['''decoder.conv_in.bias''']
_UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.weight''']
_UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.bias''']
_UpperCamelCase : int = vae_state_dict['''decoder.norm_out.weight''']
_UpperCamelCase : Dict = vae_state_dict['''decoder.norm_out.bias''']
_UpperCamelCase : Optional[int] = vae_state_dict['''quant_conv.weight''']
_UpperCamelCase : int = vae_state_dict['''quant_conv.bias''']
_UpperCamelCase : List[Any] = vae_state_dict['''post_quant_conv.weight''']
_UpperCamelCase : Optional[int] = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
_UpperCamelCase : Optional[int] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
_UpperCamelCase : Tuple = {
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
_UpperCamelCase : Any = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
_UpperCamelCase : int = {
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(UpperCamelCase )
}
for i in range(UpperCamelCase ):
_UpperCamelCase : Any = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
_UpperCamelCase : Optional[int] = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
_UpperCamelCase : Dict = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
_UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Union[str, Any] = {'''old''': f'''down.{i}.block''', '''new''': f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : List[str] = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
_UpperCamelCase : Tuple = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
_UpperCamelCase : Optional[int] = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
_UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Tuple = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : Tuple = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
_UpperCamelCase : List[str] = renew_vae_attention_paths(UpperCamelCase )
_UpperCamelCase : Any = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
for i in range(UpperCamelCase ):
_UpperCamelCase : Union[str, Any] = num_up_blocks - 1 - i
_UpperCamelCase : Optional[int] = [
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
_UpperCamelCase : Tuple = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
_UpperCamelCase : Any = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
_UpperCamelCase : Any = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Any = {'''old''': f'''up.{block_id}.block''', '''new''': f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : List[Any] = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
_UpperCamelCase : Optional[Any] = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
_UpperCamelCase : int = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
_UpperCamelCase : Optional[int] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Optional[Any] = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : Tuple = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
_UpperCamelCase : Tuple = renew_vae_attention_paths(UpperCamelCase )
_UpperCamelCase : Dict = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
return new_checkpoint
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,) -> List[str]:
# Only support V1
_UpperCamelCase : Tuple = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
_UpperCamelCase : List[Any] = io.BytesIO(r.content )
_UpperCamelCase : Optional[int] = OmegaConf.load(UpperCamelCase )
_UpperCamelCase : str = 5_12
_UpperCamelCase : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
_UpperCamelCase : str = {}
with safe_open(UpperCamelCase ,framework='''pt''' ,device='''cpu''' ) as f:
for key in f.keys():
_UpperCamelCase : Union[str, Any] = f.get_tensor(UpperCamelCase )
else:
_UpperCamelCase : str = torch.load(UpperCamelCase ,map_location=UpperCamelCase )['''state_dict''']
# Convert the VAE model.
_UpperCamelCase : Dict = create_vae_diffusers_config(UpperCamelCase ,image_size=UpperCamelCase )
_UpperCamelCase : str = custom_convert_ldm_vae_checkpoint(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Dict = AutoencoderKL(**UpperCamelCase )
vae.load_state_dict(UpperCamelCase )
vae.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
_UpperCAmelCase : int = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 683 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
A_ : Optional[int] = None
A_ : List[Any] = logging.get_logger(__name__)
A_ : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
A_ : Any = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
A_ : Tuple = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
A_ : Tuple = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = ['''input_ids''', '''attention_mask''']
lowerCamelCase__ = MBartTokenizer
lowerCamelCase__ = []
lowerCamelCase__ = []
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : List[Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
snake_case__ : Tuple = vocab_file
snake_case__ : List[str] = False if not self.vocab_file else True
snake_case__ : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
snake_case__ : List[Any] = {
lang_code: self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case__ : Any = src_lang if src_lang is not None else """en_XX"""
snake_case__ : Optional[Any] = self.convert_tokens_to_ids(self._src_lang )
snake_case__ : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
snake_case__ : Tuple = [self.sep_token_id]
snake_case__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case__ : List[Any] = src_lang
snake_case__ : str = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tgt_lang_id
return inputs
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ):
snake_case__ : Union[str, Any] = src_lang
snake_case__ : int = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Union[str, Any] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
snake_case__ : int = []
snake_case__ : Dict = [self.eos_token_id, self.cur_lang_code]
snake_case__ : int = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case__ : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case__ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = []
snake_case__ : List[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case__ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case__ : Dict = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case__ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory." )
return
snake_case__ : List[str] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 38 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : str = ['image_processor', 'tokenizer']
A__ : Dict = 'CLIPImageProcessor'
A__ : str = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> List[Any]:
_UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
_UpperCamelCase : Optional[Any] = kwargs.pop('''feature_extractor''' )
_UpperCamelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Dict:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_UpperCamelCase : List[str] = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
_UpperCamelCase : str = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
_UpperCamelCase : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Tuple:
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Any:
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _lowercase ( self ) -> int:
_UpperCamelCase : Optional[int] = self.tokenizer.model_input_names
_UpperCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 683 | 0 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if any(not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(SCREAMING_SNAKE_CASE__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9] | 39 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_UpperCAmelCase : Union[str, Any] = (720, 1280) # Height, Width
_UpperCAmelCase : str = (0.4, 0.6) # if height or width lower than this scale, drop it.
_UpperCAmelCase : Optional[Any] = 1 / 100
_UpperCAmelCase : Optional[Any] = """"""
_UpperCAmelCase : int = """"""
_UpperCAmelCase : Union[str, Any] = """"""
_UpperCAmelCase : List[Any] = 250
def snake_case__ ( ) -> None:
_UpperCamelCase, _UpperCamelCase : List[Any] = get_dataset(UpperCamelCase ,UpperCamelCase )
for index in range(UpperCamelCase ):
_UpperCamelCase : List[str] = random.sample(range(len(UpperCamelCase ) ) ,4 )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = update_image_and_anno(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,filter_scale=UpperCamelCase ,)
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_UpperCamelCase : List[str] = random_chars(32 )
_UpperCamelCase : List[str] = path.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
_UpperCamelCase : Any = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' ,UpperCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
_UpperCamelCase : Any = []
for anno in new_annos:
_UpperCamelCase : List[Any] = anno[3] - anno[1]
_UpperCamelCase : int = anno[4] - anno[2]
_UpperCamelCase : int = anno[1] + width / 2
_UpperCamelCase : int = anno[2] + height / 2
_UpperCamelCase : Optional[Any] = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(UpperCamelCase )
with open(f'''{file_root}.txt''' ,'''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> tuple[list, list]:
_UpperCamelCase : List[str] = []
_UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(UpperCamelCase ,'''*.txt''' ) ):
_UpperCamelCase : int = label_file.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
with open(UpperCamelCase ) as in_file:
_UpperCamelCase : Dict = in_file.readlines()
_UpperCamelCase : Tuple = os.path.join(UpperCamelCase ,f'''{label_name}.jpg''' )
_UpperCamelCase : Tuple = []
for obj_list in obj_lists:
_UpperCamelCase : List[Any] = obj_list.rstrip('''\n''' ).split(''' ''' )
_UpperCamelCase : Tuple = float(obj[1] ) - float(obj[3] ) / 2
_UpperCamelCase : Any = float(obj[2] ) - float(obj[4] ) / 2
_UpperCamelCase : Tuple = float(obj[1] ) + float(obj[3] ) / 2
_UpperCamelCase : List[Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCamelCase )
labels.append(UpperCamelCase )
return img_paths, labels
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = 0.0 ,) -> tuple[list, list, str]:
_UpperCamelCase : Optional[int] = np.zeros([output_size[0], output_size[1], 3] ,dtype=np.uinta )
_UpperCamelCase : str = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = int(scale_x * output_size[1] )
_UpperCamelCase : Dict = int(scale_y * output_size[0] )
_UpperCamelCase : int = []
_UpperCamelCase : Union[str, Any] = []
for i, index in enumerate(UpperCamelCase ):
_UpperCamelCase : Optional[int] = all_img_list[index]
path_list.append(UpperCamelCase )
_UpperCamelCase : str = all_annos[index]
_UpperCamelCase : Tuple = cva.imread(UpperCamelCase )
if i == 0: # top-left
_UpperCamelCase : Any = cva.resize(UpperCamelCase ,(divid_point_x, divid_point_y) )
_UpperCamelCase : Any = img
for bbox in img_annos:
_UpperCamelCase : List[Any] = bbox[1] * scale_x
_UpperCamelCase : Dict = bbox[2] * scale_y
_UpperCamelCase : Any = bbox[3] * scale_x
_UpperCamelCase : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_UpperCamelCase : Union[str, Any] = cva.resize(UpperCamelCase ,(output_size[1] - divid_point_x, divid_point_y) )
_UpperCamelCase : List[Any] = img
for bbox in img_annos:
_UpperCamelCase : Any = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Optional[Any] = bbox[2] * scale_y
_UpperCamelCase : Any = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : Optional[int] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_UpperCamelCase : Dict = cva.resize(UpperCamelCase ,(divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : List[str] = img
for bbox in img_annos:
_UpperCamelCase : int = bbox[1] * scale_x
_UpperCamelCase : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : int = bbox[3] * scale_x
_UpperCamelCase : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_UpperCamelCase : Dict = cva.resize(
UpperCamelCase ,(output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : Union[str, Any] = img
for bbox in img_annos:
_UpperCamelCase : Optional[int] = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_UpperCamelCase : Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case__ ( UpperCamelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_UpperCamelCase : Tuple = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase ) for _ in range(UpperCamelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 683 | 0 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__UpperCAmelCase = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def UpperCamelCase ( snake_case__ : str , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Tuple ) -> Optional[Any]:
if got_ver is None or want_ver is None:
raise ValueError(
F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
F""" reinstalling {pkg}.""" )
if not ops[op](version.parse(snake_case__ ) , version.parse(snake_case__ ) ):
raise ImportError(
F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def UpperCamelCase ( snake_case__ : str , snake_case__ : Optional[str] = None ) -> None:
UpperCamelCase : Union[str, Any] = F"""\n{hint}""" if hint is not None else ''
# non-versioned check
if re.match(R'^[\w_\-\d]+$' , snake_case__ ):
UpperCamelCase , UpperCamelCase , UpperCamelCase : str = requirement, None, None
else:
UpperCamelCase : Union[str, Any] = re.findall(R'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , snake_case__ )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
F""" got {requirement}""" )
UpperCamelCase , UpperCamelCase : Dict = match[0]
UpperCamelCase : Dict = want_full.split(',' ) # there could be multiple requirements
UpperCamelCase : Any = {}
for w in want_range:
UpperCamelCase : Union[str, Any] = re.findall(R'^([\s!=<>]{1,2})(.+)' , snake_case__ )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
F""" but got {requirement}""" )
UpperCamelCase , UpperCamelCase : Any = match[0]
UpperCamelCase : Any = want_ver
if op not in ops:
raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
UpperCamelCase : Optional[int] = '.'.join([str(snake_case__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return
# check if any version is installed
try:
UpperCamelCase : Tuple = importlib.metadata.version(snake_case__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def UpperCamelCase ( snake_case__ : List[str] ) -> Optional[Any]:
UpperCamelCase : Any = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(snake_case__ , snake_case__ )
| 40 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCAmelCase ( a_ ):
"""simple docstring"""
@slow
@require_torch
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
_UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_UpperCamelCase : Optional[Any] = bertabert.config.encoder.vocab_size
_UpperCamelCase : List[str] = tokenizer.sep_token_id
_UpperCamelCase : List[str] = tokenizer.cls_token_id
_UpperCamelCase : Optional[Any] = 128
_UpperCamelCase : int = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
_UpperCamelCase : Dict = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
_UpperCamelCase : Dict = train_dataset.select(range(32 ) )
_UpperCamelCase : Tuple = val_dataset.select(range(16 ) )
_UpperCamelCase : Union[str, Any] = 4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCamelCase : Optional[Any] = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_snake_case , max_length=512 )
_UpperCamelCase : Optional[int] = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_snake_case , max_length=128 )
_UpperCamelCase : str = inputs.input_ids
_UpperCamelCase : Union[str, Any] = inputs.attention_mask
_UpperCamelCase : str = outputs.input_ids
_UpperCamelCase : str = outputs.input_ids.copy()
_UpperCamelCase : Tuple = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
_UpperCamelCase : Union[str, Any] = outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
_UpperCamelCase : Dict = pred.label_ids
_UpperCamelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_UpperCamelCase : Any = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCamelCase : Dict = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCamelCase : int = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
_UpperCamelCase : Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
_UpperCamelCase : List[Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
_UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_UpperCamelCase : Union[str, Any] = SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy='''steps''' , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCamelCase : Optional[int] = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train()
| 683 | 0 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : list[tuple[float, float]] ):
__lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowercase = len(lowercase__ ) - 1
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree ,lowercase__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowercase__ ) ,5 ) == 1
return output_values
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = self.basis_function(lowercase__ )
__lowercase = 0.0
__lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : float = 0.0_1 ):
from matplotlib import pyplot as plt # type: ignore
__lowercase = [] # x coordinates of points to plot
__lowercase = [] # y coordinates of points to plot
__lowercase = 0.0
while t <= 1:
__lowercase = self.bezier_curve_function(lowercase__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowercase = [i[0] for i in self.list_of_points]
__lowercase = [i[1] for i in self.list_of_points]
plt.plot(
lowercase__ ,lowercase__ ,color='''blue''' ,label='''Curve of Degree ''' + str(self.degree ) ,)
plt.scatter(lowercase__ ,lowercase__ ,color='''red''' ,label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 41 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def snake_case__ ( UpperCamelCase=None ) -> Optional[int]:
if subparsers is not None:
_UpperCamelCase : Dict = subparsers.add_parser('''env''' )
else:
_UpperCamelCase : Tuple = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' ,default=UpperCamelCase ,help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase )
return parser
def snake_case__ ( UpperCamelCase ) -> Any:
_UpperCamelCase : int = torch.__version__
_UpperCamelCase : int = torch.cuda.is_available()
_UpperCamelCase : List[str] = is_xpu_available()
_UpperCamelCase : Dict = is_npu_available()
_UpperCamelCase : Optional[Any] = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCamelCase ):
_UpperCamelCase : List[str] = load_config_from_file(args.config_file ).to_dict()
_UpperCamelCase : List[Any] = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''PyTorch XPU available''': str(UpperCamelCase ),
'''PyTorch NPU available''': str(UpperCamelCase ),
'''System RAM''': f'''{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB''',
}
if pt_cuda_available:
_UpperCamelCase : int = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([f'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
_UpperCamelCase : Union[str, Any] = (
'''\n'''.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase ,UpperCamelCase )
else f'''\t{accelerate_config}'''
)
print(UpperCamelCase )
_UpperCamelCase : str = accelerate_config
return info
def snake_case__ ( ) -> int:
_UpperCamelCase : str = env_command_parser()
_UpperCamelCase : Any = parser.parse_args()
env_command(UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 683 | 0 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
if not (isinstance(__UpperCamelCase ,__UpperCamelCase ) and isinstance(__UpperCamelCase ,__UpperCamelCase )):
raise ValueError('longest_common_substring() takes two strings for inputs' )
lowerCamelCase_ = len(__UpperCamelCase )
lowerCamelCase_ = len(__UpperCamelCase )
lowerCamelCase_ = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for i in range(1 ,texta_length + 1 ):
for j in range(1 ,texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
lowerCamelCase_ = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
lowerCamelCase_ = i
lowerCamelCase_ = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
def snake_case__ ( UpperCamelCase ) -> Tuple:
_UpperCamelCase : str = '''huggingface/label-files'''
_UpperCamelCase : Optional[Any] = '''imagenet-1k-id2label.json'''
_UpperCamelCase : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase ,UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
_UpperCamelCase : Optional[int] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_UpperCamelCase : Dict = {v: k for k, v in idalabel.items()}
_UpperCamelCase : Optional[Any] = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_UpperCamelCase : Union[str, Any] = BitConfig(
conv_layer=UpperCamelCase ,num_labels=10_00 ,idalabel=UpperCamelCase ,labelaid=UpperCamelCase ,)
return config
def snake_case__ ( UpperCamelCase ) -> str:
if "stem.conv" in name:
_UpperCamelCase : Any = name.replace('''stem.conv''' ,'''bit.embedder.convolution''' )
if "blocks" in name:
_UpperCamelCase : Union[str, Any] = name.replace('''blocks''' ,'''layers''' )
if "head.fc" in name:
_UpperCamelCase : Optional[Any] = name.replace('''head.fc''' ,'''classifier.1''' )
if name.startswith('''norm''' ):
_UpperCamelCase : Any = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
_UpperCamelCase : List[Any] = '''bit.encoder.''' + name
return name
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase : List[str] = Image.open(requests.get(UpperCamelCase ,stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[Any]:
_UpperCamelCase : str = get_config(UpperCamelCase )
# load original model from timm
_UpperCamelCase : int = create_model(UpperCamelCase ,pretrained=UpperCamelCase )
timm_model.eval()
# load state_dict of original model
_UpperCamelCase : int = timm_model.state_dict()
for key in state_dict.copy().keys():
_UpperCamelCase : int = state_dict.pop(UpperCamelCase )
_UpperCamelCase : Any = val.squeeze() if '''head''' in key else val
# load HuggingFace model
_UpperCamelCase : List[str] = BitForImageClassification(UpperCamelCase )
model.eval()
model.load_state_dict(UpperCamelCase )
# create image processor
_UpperCamelCase : Optional[int] = create_transform(**resolve_data_config({} ,model=UpperCamelCase ) )
_UpperCamelCase : Any = transform.transforms
_UpperCamelCase : List[str] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
_UpperCamelCase : List[str] = BitImageProcessor(
do_resize=UpperCamelCase ,size={'''shortest_edge''': timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=UpperCamelCase ,crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} ,do_normalize=UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCamelCase : str = prepare_img()
_UpperCamelCase : Dict = transform(UpperCamelCase ).unsqueeze(0 )
_UpperCamelCase : Dict = processor(UpperCamelCase ,return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(UpperCamelCase ,UpperCamelCase )
# verify logits
with torch.no_grad():
_UpperCamelCase : Optional[int] = model(UpperCamelCase )
_UpperCamelCase : Optional[int] = outputs.logits
print('''Logits:''' ,logits[0, :3] )
print('''Predicted class:''' ,model.config.idalabel[logits.argmax(-1 ).item()] )
_UpperCamelCase : List[Any] = timm_model(UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase ,outputs.logits ,atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
_UpperCAmelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 683 | 0 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase = logging.get_logger(__name__)
class _a ( enum.Enum ):
_lowercase : Union[str, Any] = 0
_lowercase : List[str] = 1
@add_end_docstrings(UpperCamelCase__ )
class _a ( UpperCamelCase__ ):
_lowercase : Any = '''generated'''
def __init__( self: int , *UpperCamelCase_: Optional[int] , **UpperCamelCase_: Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: int=None , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None , UpperCamelCase_: Tuple=None , UpperCamelCase_: Any=None , **UpperCamelCase_: Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = {}
if truncation is not None:
lowercase__ = truncation
lowercase__ = generate_kwargs
lowercase__ = {}
if return_tensors is not None and return_type is None:
lowercase__ = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase__ = return_type
if clean_up_tokenization_spaces is not None:
lowercase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase__ = self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
if len(UpperCamelCase_ ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
lowercase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int ) -> Optional[int]:
"""simple docstring"""
return True
def lowerCamelCase_ ( self: List[Any] , *UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any ) -> Tuple:
"""simple docstring"""
lowercase__ = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , UpperCamelCase_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
lowercase__ = ([prefix + arg for arg in args[0]],)
lowercase__ = True
elif isinstance(args[0] , UpperCamelCase_ ):
lowercase__ = (prefix + args[0],)
lowercase__ = False
else:
raise ValueError(
f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`' )
lowercase__ = self.tokenizer(*UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self: int , *UpperCamelCase_: List[str] , **UpperCamelCase_: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = super().__call__(*UpperCamelCase_ , **UpperCamelCase_ )
if (
isinstance(args[0] , UpperCamelCase_ )
and all(isinstance(UpperCamelCase_ , UpperCamelCase_ ) for el in args[0] )
and all(len(UpperCamelCase_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Any=TruncationStrategy.DO_NOT_TRUNCATE , **UpperCamelCase_: Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self._parse_and_tokenize(UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ )
return inputs
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[int] , **UpperCamelCase_: int ) -> List[Any]:
"""simple docstring"""
if self.framework == "pt":
lowercase__ , lowercase__ = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
lowercase__ , lowercase__ = tf.shape(model_inputs['''input_ids'''] ).numpy()
lowercase__ = generate_kwargs.get('''min_length''' , self.model.config.min_length )
lowercase__ = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(UpperCamelCase_ , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
lowercase__ = self.model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = output_ids.shape[0]
if self.framework == "pt":
lowercase__ = output_ids.reshape(UpperCamelCase_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowercase__ = tf.reshape(UpperCamelCase_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int=ReturnType.TEXT , UpperCamelCase_: Union[str, Any]=False ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase__ = {f'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
lowercase__ = {
f'{self.return_name}_text': self.tokenizer.decode(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , )
}
records.append(UpperCamelCase_ )
return records
@add_end_docstrings(UpperCamelCase__ )
class _a ( UpperCamelCase__ ):
_lowercase : int = '''summary'''
def __call__( self: Any , *UpperCamelCase_: Tuple , **UpperCamelCase_: str ) -> Tuple:
"""simple docstring"""
return super().__call__(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int ) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f'Your min_length={min_length} must be inferior than your max_length={max_length}.' )
if input_length < max_length:
logger.warning(
f'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})' )
@add_end_docstrings(UpperCamelCase__ )
class _a ( UpperCamelCase__ ):
_lowercase : Union[str, Any] = '''translation'''
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int ) -> Dict:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def lowerCamelCase_ ( self: List[Any] , *UpperCamelCase_: Optional[int] , UpperCamelCase_: int=TruncationStrategy.DO_NOT_TRUNCATE , UpperCamelCase_: Any=None , UpperCamelCase_: List[str]=None ) -> Any:
"""simple docstring"""
if getattr(self.tokenizer , '''_build_translation_inputs''' , UpperCamelCase_ ):
return self.tokenizer._build_translation_inputs(
*UpperCamelCase_ , return_tensors=self.framework , truncation=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ )
else:
return super()._parse_and_tokenize(*UpperCamelCase_ , truncation=UpperCamelCase_ )
def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: str=None , **UpperCamelCase_: Dict ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ = super()._sanitize_parameters(**UpperCamelCase_ )
if src_lang is not None:
lowercase__ = src_lang
if tgt_lang is not None:
lowercase__ = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase__ = kwargs.get('''task''' , self.task )
lowercase__ = task.split('''_''' )
if task and len(UpperCamelCase_ ) == 4:
# translation, XX, to YY
lowercase__ = items[1]
lowercase__ = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self: Optional[Any] , *UpperCamelCase_: Tuple , **UpperCamelCase_: Dict ) -> Tuple:
"""simple docstring"""
return super().__call__(*UpperCamelCase_ , **UpperCamelCase_ )
| 43 |
'''simple docstring'''
_UpperCAmelCase : Any = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def snake_case__ ( UpperCamelCase ) -> int:
_UpperCamelCase : Any = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_UpperCAmelCase : list[bool | None] = [None] * 10000000
_UpperCAmelCase : str = True
_UpperCAmelCase : Tuple = False
def snake_case__ ( UpperCamelCase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_UpperCamelCase : List[str] = chain(next_number(UpperCamelCase ) )
_UpperCamelCase : Tuple = number_chain
while number < 10_00_00_00:
_UpperCamelCase : int = number_chain
number *= 10
return number_chain
def snake_case__ ( UpperCamelCase = 10_00_00_00 ) -> int:
for i in range(1 ,UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 683 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : int = {
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = ['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = ['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 44 |
'''simple docstring'''
_UpperCAmelCase : str = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : List[str] = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> str:
assert len(str(UpperCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_UpperCamelCase : Any = year // 1_00
_UpperCamelCase : List[Any] = (5 * (century % 4) + 2) % 7
_UpperCamelCase : Tuple = year % 1_00
_UpperCamelCase : Optional[int] = centurian % 12
_UpperCamelCase : Tuple = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_UpperCamelCase : List[Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_UpperCamelCase : Optional[int] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] ):
super().__init__()
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self :Optional[int] , lowerCamelCase__ :int = 1 , lowerCamelCase__ :int = 1_00 , lowerCamelCase__ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ :Optional[float] = None , lowerCamelCase__ :bool = True , ):
if audio_length_in_s is None:
UpperCamelCase__ :List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCamelCase__ :Tuple = audio_length_in_s * self.unet.config.sample_rate
UpperCamelCase__ :str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
UpperCamelCase__ :Tuple = int(lowerCamelCase__ )
if sample_size % down_scale_factor != 0:
UpperCamelCase__ :Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
""" process.""" )
UpperCamelCase__ :Dict = int(lowerCamelCase__ )
UpperCamelCase__ :Any = next(iter(self.unet.parameters() ) ).dtype
UpperCamelCase__ :Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCamelCase__ :Union[str, Any] = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=self.device , dtype=lowerCamelCase__ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase__ , device=audio.device )
UpperCamelCase__ :List[Any] = self.scheduler.timesteps.to(lowerCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase__ :Tuple = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCamelCase__ :str = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
UpperCamelCase__ :Any = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCamelCase__ :Optional[int] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase__ ) | 45 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
"""simple docstring"""
@staticmethod
def _lowercase ( *_snake_case , **_snake_case ) -> str:
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _lowercase ( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
_UpperCamelCase : int = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Any = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def _lowercase ( self , _snake_case , _snake_case ) -> List[str]:
_UpperCamelCase : int = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
] , )
@require_torch
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[int] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
_UpperCamelCase : List[Any] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
@slow
@require_torch
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[Any] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : str = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : Dict = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def _lowercase ( self ) -> List[Any]:
pass
| 683 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
def __init__( self: str ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any]=13 ,__lowerCAmelCase: str=32 ,__lowerCAmelCase: Any=2 ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: str=16 ,__lowerCAmelCase: Any=[1, 2, 1] ,__lowerCAmelCase: List[Any]=[2, 2, 4] ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: List[str]=2.0 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: Union[str, Any]=0.0 ,__lowerCAmelCase: Dict=0.0 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: Tuple="gelu" ,__lowerCAmelCase: List[str]=False ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: str=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-5 ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: List[Any]=10 ,__lowerCAmelCase: int=8 ,):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : List[Any] = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : str = num_channels
_lowerCamelCase : Optional[int] = embed_dim
_lowerCamelCase : str = depths
_lowerCamelCase : Optional[int] = num_heads
_lowerCamelCase : List[str] = window_size
_lowerCamelCase : Union[str, Any] = mlp_ratio
_lowerCamelCase : Tuple = qkv_bias
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : List[str] = drop_path_rate
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Optional[int] = use_absolute_embeddings
_lowerCamelCase : Optional[int] = patch_norm
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : int = scope
_lowerCamelCase : Any = use_labels
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Dict = encoder_stride
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Tuple ):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def _lowercase ( self: Dict ,__lowerCAmelCase: Any ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = SwinvaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
_lowerCamelCase : int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCamelCase : int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = SwinvaForMaskedImageModeling(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : Optional[Any] = SwinvaForMaskedImageModeling(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.type_sequence_label_size
_lowerCamelCase : Tuple = SwinvaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCAmelCase__ = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = SwinvaModelTester(self )
_lowerCamelCase : Dict = ConfigTester(self ,config_class=__lowerCAmelCase ,embed_dim=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="Swinv2 does not use inputs_embeds" )
def _lowercase ( self: List[str] ):
'''simple docstring'''
pass
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCamelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : List[str] = [*signature.parameters.keys()]
_lowerCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : List[Any] = True
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Tuple = False
_lowerCamelCase : int = True
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : int = outputs.attentions
_lowerCamelCase : int = len(self.model_tester.depths )
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCamelCase : int = True
_lowerCamelCase : Any = config.window_size**2
_lowerCamelCase : Tuple = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : Dict = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
# Check attention is always last and order is fine
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Tuple = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
if hasattr(self.model_tester ,"num_hidden_states_types" ):
_lowerCamelCase : Optional[int] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_lowerCamelCase : Union[str, Any] = 2
self.assertEqual(out_len + added_hidden_states ,len(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def _lowercase ( self: Tuple ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : Union[str, Any] = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : int = outputs.hidden_states
_lowerCamelCase : Optional[Any] = getattr(
self.model_tester ,"expected_num_hidden_layers" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
# Swinv2 has a different seq_length
_lowerCamelCase : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
_lowerCamelCase : int = outputs.reshaped_hidden_states
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = reshaped_hidden_states[0].shape
_lowerCamelCase : Tuple = (
reshaped_hidden_states[0].view(__lowerCAmelCase ,__lowerCAmelCase ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Union[str, Any] = 3
_lowerCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCamelCase : Dict = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCamelCase : List[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = True
self.check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Tuple = True
self.check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,(padded_height, padded_width) )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : str = SwinvaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = _config_zero_init(__lowerCAmelCase )
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = model_class(config=__lowerCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" )
if is_vision_available()
else None
)
@slow
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to(
__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.default_image_processor
_lowerCamelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : str = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = torch.tensor([-0.39_47, -0.43_06, 0.00_26] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowerCAmelCase ,atol=1e-4 ) ) | 46 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_UpperCAmelCase : Tuple = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 683 | 0 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 47 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase="attention" ) -> List[str]:
_UpperCamelCase : Dict = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
_UpperCamelCase : int = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_UpperCamelCase : str = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
_UpperCamelCase : Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_UpperCamelCase : Any = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
_UpperCamelCase : Optional[int] = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_UpperCamelCase : Optional[Any] = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
_UpperCamelCase : List[Any] = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[str]:
if split_mlp_wi:
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
_UpperCamelCase : Tuple = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
_UpperCamelCase : Optional[Any] = (wi_a, wi_a)
else:
_UpperCamelCase : str = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def snake_case__ ( UpperCamelCase ,*, UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ) -> int:
_UpperCamelCase : Any = traverse_util.flatten_dict(variables['''target'''] )
_UpperCamelCase : Optional[Any] = {'''/'''.join(UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_UpperCamelCase : str = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,UpperCamelCase )
_UpperCamelCase : Optional[int] = collections.OrderedDict()
# Shared embeddings.
_UpperCamelCase : str = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''attention''' )
_UpperCamelCase : Tuple = layer_norm
_UpperCamelCase : int = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : Dict = v.T
# Block i, layer 1 (MLP).
_UpperCamelCase : Dict = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : int = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,UpperCamelCase )
_UpperCamelCase : Union[str, Any] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Optional[Any] = wi[1].T
else:
_UpperCamelCase : List[Any] = wi.T
_UpperCamelCase : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : Union[str, Any] = tax_relpos_bias_lookup(
UpperCamelCase ,UpperCamelCase ,'''encoder''' ).T
_UpperCamelCase : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_UpperCamelCase : List[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''encoder''' ).T
_UpperCamelCase : Optional[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''self_attention''' )
_UpperCamelCase : int = layer_norm
_UpperCamelCase : Union[str, Any] = k.T
_UpperCamelCase : Optional[int] = o.T
_UpperCamelCase : Dict = q.T
_UpperCamelCase : Tuple = v.T
# Block i, layer 1 (Cross Attention).
_UpperCamelCase : str = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''encoder_decoder_attention''' )
_UpperCamelCase : Dict = layer_norm
_UpperCamelCase : Optional[int] = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : str = v.T
# Block i, layer 2 (MLP).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : List[Any] = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,UpperCamelCase )
_UpperCamelCase : List[str] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Union[str, Any] = wi[1].T
else:
_UpperCamelCase : Dict = wi.T
_UpperCamelCase : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : int = tax_relpos_bias_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ).T
_UpperCamelCase : Optional[int] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_UpperCamelCase : str = old['''decoder/logits_dense/kernel'''].T
return new
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
_UpperCamelCase : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : str = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : int = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_UpperCamelCase : Any = state_dict['''shared.weight''']
return state_dict
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Any:
_UpperCamelCase : List[Any] = checkpoints.load_tax_checkpoint(UpperCamelCase )
_UpperCamelCase : str = convert_tax_to_pytorch(
UpperCamelCase ,num_layers=config.num_layers ,is_encoder_only=UpperCamelCase ,scalable_attention=UpperCamelCase )
_UpperCamelCase : Optional[Any] = make_state_dict(UpperCamelCase ,UpperCamelCase )
model.load_state_dict(UpperCamelCase ,strict=UpperCamelCase )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,UpperCamelCase = False ,) -> int:
_UpperCamelCase : int = MTaConfig.from_json_file(UpperCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_UpperCamelCase : Optional[int] = UMTaEncoderModel(UpperCamelCase )
else:
_UpperCamelCase : Optional[int] = UMTaForConditionalGeneration(UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCamelCase )
print('''Done''' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 683 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
def A ( UpperCamelCase_ : str ) -> int:
'''simple docstring'''
if "resnet-50" in model_name:
lowerCAmelCase__ = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
lowerCAmelCase__ = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
lowerCAmelCase__ = DetrConfig(use_timm_backbone=UpperCamelCase_ , backbone_config=UpperCamelCase_ )
# set label attributes
lowerCAmelCase__ = "panoptic" in model_name
if is_panoptic:
lowerCAmelCase__ = 2_50
else:
lowerCAmelCase__ = 91
lowerCAmelCase__ = "huggingface/label-files"
lowerCAmelCase__ = "coco-detection-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="dataset" ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def A ( UpperCamelCase_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
F"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
F"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] ) -> str:
'''simple docstring'''
lowerCAmelCase__ = state_dict.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def A ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Any=False ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = ""
if is_panoptic:
lowerCAmelCase__ = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCAmelCase__ = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[:2_56, :]
lowerCAmelCase__ = in_proj_bias[:2_56]
lowerCAmelCase__ = in_proj_weight[2_56:5_12, :]
lowerCAmelCase__ = in_proj_bias[2_56:5_12]
lowerCAmelCase__ = in_proj_weight[-2_56:, :]
lowerCAmelCase__ = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowerCAmelCase__ = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCAmelCase__ = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[:2_56, :]
lowerCAmelCase__ = in_proj_bias[:2_56]
lowerCAmelCase__ = in_proj_weight[2_56:5_12, :]
lowerCAmelCase__ = in_proj_bias[2_56:5_12]
lowerCAmelCase__ = in_proj_weight[-2_56:, :]
lowerCAmelCase__ = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
lowerCAmelCase__ = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
lowerCAmelCase__ = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCAmelCase__ = in_proj_weight_cross_attn[:2_56, :]
lowerCAmelCase__ = in_proj_bias_cross_attn[:2_56]
lowerCAmelCase__ = in_proj_weight_cross_attn[2_56:5_12, :]
lowerCAmelCase__ = in_proj_bias_cross_attn[2_56:5_12]
lowerCAmelCase__ = in_proj_weight_cross_attn[-2_56:, :]
lowerCAmelCase__ = in_proj_bias_cross_attn[-2_56:]
def A ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Union[str, Any]=False ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ ,lowerCAmelCase__ = get_detr_config(UpperCamelCase_ )
# load original model from torch hub
lowerCAmelCase__ = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F"""Converting model {model_name}...""" )
lowerCAmelCase__ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=UpperCamelCase_ ).eval()
lowerCAmelCase__ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(UpperCamelCase_ ):
if is_panoptic:
lowerCAmelCase__ = "detr." + src
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase_ , is_panoptic=UpperCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCAmelCase__ = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
lowerCAmelCase__ = state_dict.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCAmelCase__ = state_dict.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
lowerCAmelCase__ = state_dict.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowerCAmelCase__ = state_dict.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
# finally, create HuggingFace model and load state dict
lowerCAmelCase__ = DetrForSegmentation(UpperCamelCase_ ) if is_panoptic else DetrForObjectDetection(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
# verify our conversion on an image
lowerCAmelCase__ = "coco_panoptic" if is_panoptic else "coco_detection"
lowerCAmelCase__ = DetrImageProcessor(format=UpperCamelCase_ )
lowerCAmelCase__ = processor(images=prepare_img() , return_tensors="pt" )
lowerCAmelCase__ = encoding["pixel_values"]
lowerCAmelCase__ = detr(UpperCamelCase_ )
lowerCAmelCase__ = model(UpperCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F"""nielsr/{model_name}""" )
processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
UpperCAmelCase__ : str = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
UpperCAmelCase__ : Tuple = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 48 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_UpperCAmelCase : int = 100
_UpperCAmelCase : List[Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_UpperCAmelCase : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def snake_case__ ( UpperCamelCase ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_UpperCamelCase : set[int] = set()
_UpperCamelCase : int
_UpperCamelCase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def snake_case__ ( UpperCamelCase = 50_00 ) -> int | None:
for number_to_partition in range(1 ,UpperCamelCase ):
if len(partition(UpperCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 683 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : int = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = ['MaskFormerFeatureExtractor']
_lowercase : int = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
_lowercase : List[str] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 49 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_UpperCAmelCase : Dict = """bart"""
_UpperCAmelCase : List[str] = True
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> int:
if LOAD_DENSE_INDEX:
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase : Tuple = qar_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase : Any = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase : Dict = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase : Tuple = sas_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = make_qa_sas_model(
model_name='''t5-small''' ,from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' ,device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> List[Any]:
if LOAD_DENSE_INDEX:
_UpperCamelCase : str = faiss.StandardGpuResources()
_UpperCamelCase : Optional[int] = datasets.load_dataset(path='''wiki_snippets''' ,name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase : List[str] = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(wikiaab_passages.num_rows, 1_28) ,)
_UpperCamelCase : Any = faiss.IndexFlatIP(1_28 )
_UpperCamelCase : str = faiss.index_cpu_to_gpu(UpperCamelCase ,1 ,UpperCamelCase )
wikiaab_gpu_index_flat.add(UpperCamelCase ) # TODO fix for larger GPU
else:
_UpperCamelCase, _UpperCamelCase : Optional[int] = (None, None)
_UpperCamelCase : int = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : List[Any] = datasets.load_dataset('''eli5''' ,name='''LFQA_reddit''' )
_UpperCamelCase : Optional[int] = elia['''train_eli5''']
_UpperCamelCase : Any = np.memmap(
'''eli5_questions_reps.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(elia_train.num_rows, 1_28) )
_UpperCamelCase : Optional[Any] = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(UpperCamelCase )
return (elia_train, eli5_train_q_index)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_indexes()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_models()
_UpperCAmelCase , _UpperCAmelCase : int = load_train_data()
def snake_case__ ( UpperCamelCase ,UpperCamelCase=10 ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = embed_questions_for_retrieval([question] ,UpperCamelCase ,UpperCamelCase )
_UpperCamelCase, _UpperCamelCase : Optional[Any] = eli5_train_q_index.search(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = [elia_train[int(UpperCamelCase )] for i in I[0]]
return nn_examples
def snake_case__ ( UpperCamelCase ,UpperCamelCase="wiki40b" ,UpperCamelCase="dense" ,UpperCamelCase=10 ) -> Optional[int]:
if source == "none":
_UpperCamelCase, _UpperCamelCase : Dict = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase, _UpperCamelCase : str = query_qa_dense_index(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
else:
_UpperCamelCase, _UpperCamelCase : str = query_es_index(
UpperCamelCase ,UpperCamelCase ,index_name='''english_wiki40b_snippets_100w''' ,n_results=UpperCamelCase ,)
_UpperCamelCase : Optional[int] = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase : Optional[Any] = '''question: {} context: {}'''.format(UpperCamelCase ,UpperCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCamelCase : None),
} )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=64 ,UpperCamelCase=2_56 ,UpperCamelCase=False ,UpperCamelCase=2 ,UpperCamelCase=0.95 ,UpperCamelCase=0.8 ) -> Optional[Any]:
with torch.no_grad():
_UpperCamelCase : Any = qa_sas_generate(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,num_answers=1 ,num_beams=UpperCamelCase ,min_len=UpperCamelCase ,max_len=UpperCamelCase ,do_sample=UpperCamelCase ,temp=UpperCamelCase ,top_p=UpperCamelCase ,top_k=UpperCamelCase ,max_input_length=10_24 ,device='''cuda:0''' ,)[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_UpperCAmelCase : str = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_UpperCAmelCase : Tuple = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_UpperCAmelCase : Dict = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_UpperCAmelCase : List[str] = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_UpperCAmelCase : Optional[int] = st.sidebar.checkbox("""Demo options""")
if demo_options:
_UpperCAmelCase : List[str] = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_UpperCAmelCase : List[Any] = action_list.index(action_st)
_UpperCAmelCase : Tuple = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_UpperCAmelCase : Optional[Any] = show_type == """Show full text of passages"""
else:
_UpperCAmelCase : Union[str, Any] = 3
_UpperCAmelCase : str = True
_UpperCAmelCase : str = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_UpperCAmelCase : Optional[Any] = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_UpperCAmelCase : Optional[Any] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_UpperCAmelCase : Dict = """wiki40b"""
_UpperCAmelCase : str = """dense"""
_UpperCAmelCase : List[str] = """beam"""
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : List[str] = 64
_UpperCAmelCase : List[Any] = 256
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = st.sidebar.checkbox("""Generation options""")
if generate_options:
_UpperCAmelCase : Union[str, Any] = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_UpperCAmelCase : Dict = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_UpperCAmelCase : List[Any] = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_UpperCAmelCase : List[str] = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[int] = None
# start main text
_UpperCAmelCase : Union[str, Any] = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_UpperCAmelCase : int = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_UpperCAmelCase : Any = st.text_input("""Enter your question here:""", """""")
else:
_UpperCAmelCase : Tuple = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_UpperCAmelCase , _UpperCAmelCase : str = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_UpperCAmelCase , _UpperCAmelCase : List[Any] = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_UpperCAmelCase : int = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_UpperCAmelCase : int = support_list[:10]
_UpperCAmelCase : Tuple = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_UpperCAmelCase , _UpperCAmelCase : Any = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_UpperCAmelCase : Tuple = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_UpperCAmelCase : List[Any] = res[1].strip()
if sec_titles == "":
_UpperCAmelCase : Optional[int] = """[{}]({})""".format(res[0], wiki_url)
else:
_UpperCAmelCase : Optional[int] = sec_titles.split(""" & """)
_UpperCAmelCase : Tuple = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_UpperCAmelCase : Dict = find_nearest_training(question)
_UpperCAmelCase : List[Any] = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_UpperCAmelCase : List[Any] = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_UpperCAmelCase : List[Any] = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 683 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Dict = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'codegen'
_UpperCamelCase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_04_00 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=64 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_ctx
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = rotary_dim
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = use_cache
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(
bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,tie_word_embeddings=_lowerCAmelCase ,**_lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,):
super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase )
if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ):
# TODO: how to do that better?
lowerCamelCase__ = 0
@property
def UpperCamelCase_ ( self ):
lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" )
lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase__ = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase_ ( self ):
return self._config.n_layer
@property
def UpperCamelCase_ ( self ):
return self._config.n_head
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,):
lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs(
_lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase__ = seqlen + 2
lowerCamelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers )
]
lowerCamelCase__ = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase__ = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self ):
return 13
| 50 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> Optional[int]:
_UpperCamelCase : int = value
_UpperCamelCase : Node | None = None # Added in order to delete a node easier
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> List[Any]:
_UpperCamelCase : str = root
def __str__( self ) -> str:
return str(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if new_children is not None: # reset its kids
_UpperCamelCase : Union[str, Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_snake_case ): # If it is the right children
_UpperCamelCase : str = new_children
else:
_UpperCamelCase : Any = new_children
else:
_UpperCamelCase : Any = new_children
def _lowercase ( self , _snake_case ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _lowercase ( self ) -> bool:
return self.root is None
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : List[Any] = Node(_snake_case ) # create a new Node
if self.empty(): # if Tree is empty
_UpperCamelCase : Optional[Any] = new_node # set its root
else: # Tree is not empty
_UpperCamelCase : int = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_UpperCamelCase : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
_UpperCamelCase : Union[str, Any] = parent_node.left
else:
if parent_node.right is None:
_UpperCamelCase : Any = new_node
break
else:
_UpperCamelCase : str = parent_node.right
_UpperCamelCase : Any = parent_node
def _lowercase ( self , *_snake_case ) -> None:
for value in values:
self.__insert(_snake_case )
def _lowercase ( self , _snake_case ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
_UpperCamelCase : List[str] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_UpperCamelCase : Optional[Any] = node.left if value < node.value else node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
if self.root is None:
return None
_UpperCamelCase : Dict = self.root
if not self.empty():
while node.right is not None:
_UpperCamelCase : Tuple = node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
_UpperCamelCase : Optional[Any] = self.root
if self.root is None:
return None
if not self.empty():
_UpperCamelCase : Optional[int] = self.root
while node.left is not None:
_UpperCamelCase : List[str] = node.left
return node
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : str = self.search(_snake_case ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_snake_case , _snake_case )
elif node.left is None: # Has only right children
self.__reassign_nodes(_snake_case , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_snake_case , node.left )
else:
_UpperCamelCase : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_UpperCamelCase : int = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _lowercase ( self , _snake_case ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def _lowercase ( self , _snake_case=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if node:
self.inorder(_snake_case , node.left )
arr.append(node.value )
self.inorder(_snake_case , node.right )
def _lowercase ( self , _snake_case , _snake_case ) -> int:
_UpperCamelCase : list[int] = []
self.inorder(_snake_case , _snake_case ) # append all values to list using inorder traversal
return arr[k - 1]
def snake_case__ ( UpperCamelCase ) -> list[Node]:
_UpperCamelCase : int = []
if curr_node is not None:
_UpperCamelCase : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def snake_case__ ( ) -> None:
_UpperCamelCase : Any = (8, 3, 6, 1, 10, 14, 13, 4, 7)
_UpperCamelCase : Tuple = BinarySearchTree()
for i in testlist:
t.insert(UpperCamelCase )
# Prints all the elements of the list in order traversal
print(UpperCamelCase )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' ,t.get_max().value ) # type: ignore
print('''Min Value: ''' ,t.get_min().value ) # type: ignore
for i in testlist:
t.remove(UpperCamelCase )
print(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 683 | 0 |
'''simple docstring'''
import os
def __snake_case ( ) -> Tuple:
"""simple docstring"""
with open(os.path.dirname(SCREAMING_SNAKE_CASE_ ) + '''/p022_names.txt''' ) as file:
UpperCAmelCase = str(file.readlines()[0] )
UpperCAmelCase = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
UpperCAmelCase = 0
UpperCAmelCase = 0
for i, name in enumerate(SCREAMING_SNAKE_CASE_ ):
for letter in name:
name_score += ord(SCREAMING_SNAKE_CASE_ ) - 64
total_score += (i + 1) * name_score
UpperCAmelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 51 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
_UpperCAmelCase : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
_UpperCAmelCase : int = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : Dict = 'whisper'
A__ : Tuple = ['past_key_values']
A__ : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _snake_case=51865 , _snake_case=80 , _snake_case=6 , _snake_case=4 , _snake_case=6 , _snake_case=4 , _snake_case=1536 , _snake_case=1536 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=50257 , _snake_case=True , _snake_case=True , _snake_case="gelu" , _snake_case=256 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=False , _snake_case=1500 , _snake_case=448 , _snake_case=50256 , _snake_case=50256 , _snake_case=50256 , _snake_case=None , _snake_case=[220, 50256] , _snake_case=False , _snake_case=256 , _snake_case=False , _snake_case=0.05 , _snake_case=10 , _snake_case=2 , _snake_case=0.0 , _snake_case=10 , _snake_case=0 , _snake_case=7 , **_snake_case , ) -> Any:
_UpperCamelCase : Union[str, Any] = vocab_size
_UpperCamelCase : Union[str, Any] = num_mel_bins
_UpperCamelCase : List[str] = d_model
_UpperCamelCase : str = encoder_layers
_UpperCamelCase : Optional[int] = encoder_attention_heads
_UpperCamelCase : str = decoder_layers
_UpperCamelCase : Tuple = decoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : Optional[int] = encoder_ffn_dim
_UpperCamelCase : Any = dropout
_UpperCamelCase : Optional[Any] = attention_dropout
_UpperCamelCase : List[Any] = activation_dropout
_UpperCamelCase : int = activation_function
_UpperCamelCase : List[Any] = init_std
_UpperCamelCase : Optional[int] = encoder_layerdrop
_UpperCamelCase : str = decoder_layerdrop
_UpperCamelCase : List[str] = use_cache
_UpperCamelCase : Optional[Any] = encoder_layers
_UpperCamelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase : List[str] = max_source_positions
_UpperCamelCase : Optional[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase : str = classifier_proj_size
_UpperCamelCase : List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase : int = apply_spec_augment
_UpperCamelCase : str = mask_time_prob
_UpperCamelCase : int = mask_time_length
_UpperCamelCase : List[Any] = mask_time_min_masks
_UpperCamelCase : List[str] = mask_feature_prob
_UpperCamelCase : Optional[int] = mask_feature_length
_UpperCamelCase : Union[str, Any] = mask_feature_min_masks
_UpperCamelCase : Union[str, Any] = median_filter_width
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , suppress_tokens=_snake_case , begin_suppress_tokens=_snake_case , **_snake_case , )
class UpperCAmelCase ( a_ ):
"""simple docstring"""
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
_UpperCamelCase : Dict = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
_UpperCamelCase : Tuple = {0: '''batch'''}
else:
_UpperCamelCase : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''' )
return common_inputs
def _lowercase ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , _snake_case = 22050 , _snake_case = 5.0 , _snake_case = 220 , ) -> Mapping[str, Any]:
_UpperCamelCase : Optional[int] = OrderedDict()
_UpperCamelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_snake_case , framework=_snake_case , sampling_rate=_snake_case , time_duration=_snake_case , frequency=_snake_case , )
_UpperCamelCase : int = encoder_inputs['''input_features'''].shape[2]
_UpperCamelCase : List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
_UpperCamelCase : str = super().generate_dummy_inputs(
preprocessor.tokenizer , _snake_case , _snake_case , _snake_case , _snake_case )
_UpperCamelCase : Union[str, Any] = encoder_inputs.pop('''input_features''' )
_UpperCamelCase : Dict = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
_UpperCamelCase : List[str] = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def _lowercase ( self ) -> float:
return 1E-3
| 683 | 0 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
A = get_logger(__name__)
A = Path(__file__).parent / '''model_card_template.md'''
A = uuida().hex
A = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
A = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
A = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __A ( a_ :Union[Dict, str, None] = None) -> str:
__a : Union[str, Any] = F"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"""; torch/{_torch_version}"""
if is_flax_available():
ua += F"""; jax/{_jax_version}"""
ua += F"""; flax/{_flax_version}"""
if is_onnx_available():
ua += F"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''').upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(a_ , a_):
ua += "; " + "; ".join(F"""{k}/{v}""" for k, v in user_agent.items())
elif isinstance(a_ , a_):
ua += "; " + user_agent
return ua
def __A ( a_ :str , a_ :Optional[str] = None , a_ :Optional[str] = None) -> Optional[int]:
if token is None:
__a : Any = HfFolder.get_token()
if organization is None:
__a : List[Any] = whoami(a_)['''name''']
return F"""{username}/{model_id}"""
else:
return F"""{organization}/{model_id}"""
def __A ( a_ :Union[str, Any] , a_ :List[str]) -> Optional[Any]:
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''')
if hasattr(a_ , '''local_rank''') and args.local_rank not in [-1, 0]:
return
__a : int = args.hub_token if hasattr(a_ , '''hub_token''') else None
__a : Any = get_full_repo_name(a_ , token=a_)
__a : Tuple = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=a_ , model_name=a_ , repo_name=a_ , dataset_name=args.dataset_name if hasattr(a_ , '''dataset_name''') else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(a_ , '''gradient_accumulation_steps''') else None
) , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta1''') else None , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta2''') else None , adam_weight_decay=args.adam_weight_decay if hasattr(a_ , '''adam_weight_decay''') else None , adam_epsilon=args.adam_epsilon if hasattr(a_ , '''adam_epsilon''') else None , lr_scheduler=args.lr_scheduler if hasattr(a_ , '''lr_scheduler''') else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(a_ , '''lr_warmup_steps''') else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(a_ , '''ema_inv_gamma''') else None , ema_power=args.ema_power if hasattr(a_ , '''ema_power''') else None , ema_max_decay=args.ema_max_decay if hasattr(a_ , '''ema_max_decay''') else None , mixed_precision=args.mixed_precision , )
__a : List[Any] = os.path.join(args.output_dir , '''README.md''')
model_card.save(a_)
def __A ( a_ :Optional[str] , a_ :Optional[str] = None) -> Union[str, Any]:
if resolved_file is None or commit_hash is not None:
return commit_hash
__a : Any = str(Path(a_).as_posix())
__a : Optional[int] = re.search(R'''snapshots/([^/]+)/''' , a_)
if search is None:
return None
__a : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(a_) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
A = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
A = os.path.join(hf_cache_home, '''diffusers''')
def __A ( a_ :Optional[str] = None , a_ :Optional[str] = None) -> None:
if new_cache_dir is None:
__a : Dict = DIFFUSERS_CACHE
if old_cache_dir is None:
__a : List[Any] = old_diffusers_cache
__a : Union[str, Any] = Path(a_).expanduser()
__a : Dict = Path(a_).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*'''):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__a : List[Any] = new_cache_dir / old_blob_path.relative_to(a_)
new_blob_path.parent.mkdir(parents=a_ , exist_ok=a_)
os.replace(a_ , a_)
try:
os.symlink(a_ , a_)
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''')
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
A = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
A = 0
else:
with open(cache_version_file) as f:
try:
A = int(f.read())
except ValueError:
A = 0
if cache_version < 1:
A = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
A = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
'''the directory exists and can be written to.'''
)
def __A ( a_ :str , a_ :Optional[str] = None) -> str:
if variant is not None:
__a : Dict = weights_name.split('''.''')
__a : List[Any] = splits[:-1] + [variant] + splits[-1:]
__a : Tuple = '''.'''.join(a_)
return weights_name
def __A ( a_ :List[Any] , *,
a_ :Union[str, Any] , a_ :Dict , a_ :Union[str, Any] , a_ :Optional[int] , a_ :str , a_ :Any , a_ :str , a_ :Optional[int] , a_ :str , a_ :Tuple , a_ :List[str]=None , ) -> Dict:
__a : int = str(a_)
if os.path.isfile(a_):
return pretrained_model_name_or_path
elif os.path.isdir(a_):
if os.path.isfile(os.path.join(a_ , a_)):
# Load from a PyTorch checkpoint
__a : Union[str, Any] = os.path.join(a_ , a_)
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(a_ , a_ , a_)):
__a : Optional[Any] = os.path.join(a_ , a_ , a_)
return model_file
else:
raise EnvironmentError(
F"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""")
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(a_).base_version) >= version.parse('''0.20.0''')
):
try:
__a : Any = hf_hub_download(
a_ , filename=_add_variant(a_ , a_) , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
warnings.warn(
F"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , a_ , )
return model_file
except: # noqa: E722
warnings.warn(
F"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(a_ , a_)} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(a_ , a_)}' so that the correct variant file can be added.""" , a_ , )
try:
# 2. Load model file as usual
__a : Optional[Any] = hf_hub_download(
a_ , filename=a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''')
except RevisionNotFoundError:
raise EnvironmentError(
F"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'''this model name. Check the model page at '''
F"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""")
except EntryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""")
except HTTPError as err:
raise EnvironmentError(
F"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""")
except ValueError:
raise EnvironmentError(
F"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
F""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
F""" directory containing a file named {weights_name} or"""
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''')
except EnvironmentError:
raise EnvironmentError(
F"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
F"""containing a file named {weights_name}""") | 52 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase : int = parser.parse_args()
if args.model_type == "roberta":
_UpperCAmelCase : Union[str, Any] = RobertaForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase : int = """roberta"""
elif args.model_type == "gpt2":
_UpperCAmelCase : Optional[int] = GPTaLMHeadModel.from_pretrained(args.model_name)
_UpperCAmelCase : Optional[int] = """transformer"""
_UpperCAmelCase : Tuple = model.state_dict()
_UpperCAmelCase : int = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_UpperCAmelCase : Optional[Any] = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_UpperCAmelCase : Tuple = f"""{prefix}.embeddings.{w}.weight"""
_UpperCAmelCase : Optional[Any] = state_dict[param_name]
for w in ["weight", "bias"]:
_UpperCAmelCase : Union[str, Any] = f"""{prefix}.embeddings.LayerNorm.{w}"""
_UpperCAmelCase : str = state_dict[param_name]
# Transformer Blocks #
_UpperCAmelCase : Dict = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_UpperCAmelCase : str = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
_UpperCAmelCase : Any = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_UpperCAmelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_UpperCAmelCase : Dict = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase : int = state_dict[f"""lm_head.dense.{w}"""]
_UpperCAmelCase : int = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_UpperCAmelCase : List[str] = state_dict[f"""{prefix}.ln_f.{w}"""]
_UpperCAmelCase : Any = state_dict["""lm_head.weight"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 683 | 0 |
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : float ):
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(100, 0.25) = }""")
print(F"""{price_plus_tax(1_25.50, 0.05) = }""")
| 53 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self , _snake_case , _snake_case ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : int = None
_UpperCamelCase : int = 20
_UpperCamelCase : Any = self._get_uniform_logits(batch_size=2 , length=_snake_case )
# tweak scores to not be uniform anymore
_UpperCamelCase : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_UpperCamelCase : Dict = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_UpperCamelCase : Any = jax.nn.softmax(_snake_case , axis=-1 )
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : List[str] = FlaxTemperatureLogitsWarper(temperature=1.3 )
_UpperCamelCase : List[str] = jax.nn.softmax(temp_dist_warper_sharper(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
_UpperCamelCase : str = jax.nn.softmax(temp_dist_warper_smoother(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase ( self ) -> Any:
_UpperCamelCase : List[Any] = None
_UpperCamelCase : Optional[int] = 10
_UpperCamelCase : Any = 2
# create ramp distribution
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy()
_UpperCamelCase : Union[str, Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_UpperCamelCase : Optional[int] = 5
_UpperCamelCase : str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_UpperCamelCase : Union[str, Any] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, length) ).copy()
_UpperCamelCase : Optional[Any] = top_k_warp_safety_check(_snake_case , _snake_case , cur_len=_snake_case )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : Any = None
_UpperCamelCase : Any = 10
_UpperCamelCase : List[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_UpperCamelCase : Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_UpperCamelCase : List[str] = FlaxTopPLogitsWarper(0.8 )
_UpperCamelCase : Dict = np.exp(top_p_warp(_snake_case , _snake_case , cur_len=_snake_case ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_UpperCamelCase : Optional[int] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# check edge cases with negative and extreme logits
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_UpperCamelCase : Tuple = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_UpperCamelCase : Tuple = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_UpperCamelCase : Dict = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase ( self ) -> Dict:
_UpperCamelCase : List[Any] = 20
_UpperCamelCase : Optional[int] = 4
_UpperCamelCase : int = 0
_UpperCamelCase : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
# check that min length is applied at length 5
_UpperCamelCase : Any = ids_tensor((batch_size, 20) , vocab_size=20 )
_UpperCamelCase : int = 5
_UpperCamelCase : List[Any] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
_UpperCamelCase : Optional[int] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = 15
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Optional[int] = 20
_UpperCamelCase : Union[str, Any] = 4
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
# check that all scores are -inf except the bos_token_id score
_UpperCamelCase : Union[str, Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : str = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : str = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_UpperCamelCase : List[str] = 3
_UpperCamelCase : Tuple = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 20
_UpperCamelCase : Tuple = 4
_UpperCamelCase : Any = 0
_UpperCamelCase : str = 5
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
# check that all scores are -inf except the eos_token_id when max_length is reached
_UpperCamelCase : Optional[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
_UpperCamelCase : Dict = 4
_UpperCamelCase : Dict = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : int = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_UpperCamelCase : Optional[int] = 3
_UpperCamelCase : Any = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 4
_UpperCamelCase : Optional[Any] = 10
_UpperCamelCase : Dict = 15
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : Optional[Any] = 1
_UpperCamelCase : List[Any] = 15
# dummy input_ids and scores
_UpperCamelCase : Optional[int] = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Any = input_ids.copy()
_UpperCamelCase : int = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Tuple = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : List[str] = 10
# no processor list
_UpperCamelCase : Dict = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
# with processor list
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : Optional[Any] = processor(_snake_case , _snake_case , cur_len=_snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Tuple = 4
_UpperCamelCase : int = 10
_UpperCamelCase : List[Any] = 15
_UpperCamelCase : Dict = 2
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Optional[int] = 15
# dummy input_ids and scores
_UpperCamelCase : Tuple = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Optional[Any] = input_ids.copy()
_UpperCamelCase : List[str] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : Union[str, Any] = 10
# no processor list
def run_no_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : List[Any] = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
return scores
# with processor list
def run_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : List[str] = processor(_snake_case , _snake_case , cur_len=_snake_case )
return scores
_UpperCamelCase : Dict = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jitted_run_no_processor_list(_snake_case , _snake_case , _snake_case )
_UpperCamelCase : Any = jitted_run_processor_list(_snake_case , _snake_case , _snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 683 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class A ( __lowercase ):
_snake_case =42
class A ( __lowercase , __lowercase ):
_snake_case =True
@register_to_config
def __init__( self: List[str] , _lowerCAmelCase: int = 3 , _lowerCAmelCase: int = 3 , _lowerCAmelCase: Tuple[str] = ("DownEncoderBlock2D",) , _lowerCAmelCase: Tuple[str] = ("UpDecoderBlock2D",) , _lowerCAmelCase: Tuple[int] = (64,) , _lowerCAmelCase: int = 1 , _lowerCAmelCase: str = "silu" , _lowerCAmelCase: int = 4 , _lowerCAmelCase: int = 32 , _lowerCAmelCase: int = 32 , _lowerCAmelCase: float = 0.1_82_15 , ) -> Tuple:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
UpperCAmelCase_ =Encoder(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , down_block_types=_lowerCAmelCase , block_out_channels=_lowerCAmelCase , layers_per_block=_lowerCAmelCase , act_fn=_lowerCAmelCase , norm_num_groups=_lowerCAmelCase , double_z=_lowerCAmelCase , )
# pass init params to Decoder
UpperCAmelCase_ =Decoder(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , up_block_types=_lowerCAmelCase , block_out_channels=_lowerCAmelCase , layers_per_block=_lowerCAmelCase , norm_num_groups=_lowerCAmelCase , act_fn=_lowerCAmelCase , )
UpperCAmelCase_ =nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase_ =nn.Convad(_lowerCAmelCase , _lowerCAmelCase , 1 )
UpperCAmelCase_ =False
UpperCAmelCase_ =False
# only relevant if vae tiling is enabled
UpperCAmelCase_ =self.config.sample_size
UpperCAmelCase_ =(
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_ =int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_ =0.25
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: Any , _lowerCAmelCase: Optional[int]=False ) -> Tuple:
'''simple docstring'''
if isinstance(_lowerCAmelCase , (Encoder, Decoder) ):
UpperCAmelCase_ =value
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: bool = True ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =use_tiling
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
self.enable_tiling(_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =True
def lowerCAmelCase__ ( self: Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCAmelCase__ ( self: Any ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
UpperCAmelCase_ ={}
def fn_recursive_add_processors(_lowerCAmelCase: str , _lowerCAmelCase: torch.nn.Module , _lowerCAmelCase: Dict[str, AttentionProcessor] ):
if hasattr(_lowerCAmelCase , "set_processor" ):
UpperCAmelCase_ =module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' , _lowerCAmelCase , _lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return processors
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =len(self.attn_processors.keys() )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(_lowerCAmelCase )} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(_lowerCAmelCase: str , _lowerCAmelCase: torch.nn.Module , _lowerCAmelCase: Tuple ):
if hasattr(_lowerCAmelCase , "set_processor" ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
module.set_processor(_lowerCAmelCase )
else:
module.set_processor(processor.pop(F'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' , _lowerCAmelCase , _lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict ) -> int:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: bool = True ) -> AutoencoderKLOutput:
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_lowerCAmelCase , return_dict=_lowerCAmelCase )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_ =[self.encoder(_lowerCAmelCase ) for x_slice in x.split(1 )]
UpperCAmelCase_ =torch.cat(_lowerCAmelCase )
else:
UpperCAmelCase_ =self.encoder(_lowerCAmelCase )
UpperCAmelCase_ =self.quant_conv(_lowerCAmelCase )
UpperCAmelCase_ =DiagonalGaussianDistribution(_lowerCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_lowerCAmelCase , return_dict=_lowerCAmelCase )
UpperCAmelCase_ =self.post_quant_conv(_lowerCAmelCase )
UpperCAmelCase_ =self.decoder(_lowerCAmelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCAmelCase )
@apply_forward_hook
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_ =[self._decode(_lowerCAmelCase ).sample for z_slice in z.split(1 )]
UpperCAmelCase_ =torch.cat(_lowerCAmelCase )
else:
UpperCAmelCase_ =self._decode(_lowerCAmelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_lowerCAmelCase )
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: str , _lowerCAmelCase: Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =min(a.shape[2] , b.shape[2] , _lowerCAmelCase )
for y in range(_lowerCAmelCase ):
UpperCAmelCase_ =a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =min(a.shape[3] , b.shape[3] , _lowerCAmelCase )
for x in range(_lowerCAmelCase ):
UpperCAmelCase_ =a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: bool = True ) -> AutoencoderKLOutput:
'''simple docstring'''
UpperCAmelCase_ =int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ =int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_ =self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_ =[]
for i in range(0 , x.shape[2] , _lowerCAmelCase ):
UpperCAmelCase_ =[]
for j in range(0 , x.shape[3] , _lowerCAmelCase ):
UpperCAmelCase_ =x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_ =self.encoder(_lowerCAmelCase )
UpperCAmelCase_ =self.quant_conv(_lowerCAmelCase )
row.append(_lowerCAmelCase )
rows.append(_lowerCAmelCase )
UpperCAmelCase_ =[]
for i, row in enumerate(_lowerCAmelCase ):
UpperCAmelCase_ =[]
for j, tile in enumerate(_lowerCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ =self.blend_v(rows[i - 1][j] , _lowerCAmelCase , _lowerCAmelCase )
if j > 0:
UpperCAmelCase_ =self.blend_h(row[j - 1] , _lowerCAmelCase , _lowerCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_lowerCAmelCase , dim=3 ) )
UpperCAmelCase_ =torch.cat(_lowerCAmelCase , dim=2 )
UpperCAmelCase_ =DiagonalGaussianDistribution(_lowerCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_lowerCAmelCase )
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
UpperCAmelCase_ =int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ =int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_ =self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_ =[]
for i in range(0 , z.shape[2] , _lowerCAmelCase ):
UpperCAmelCase_ =[]
for j in range(0 , z.shape[3] , _lowerCAmelCase ):
UpperCAmelCase_ =z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_ =self.post_quant_conv(_lowerCAmelCase )
UpperCAmelCase_ =self.decoder(_lowerCAmelCase )
row.append(_lowerCAmelCase )
rows.append(_lowerCAmelCase )
UpperCAmelCase_ =[]
for i, row in enumerate(_lowerCAmelCase ):
UpperCAmelCase_ =[]
for j, tile in enumerate(_lowerCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ =self.blend_v(rows[i - 1][j] , _lowerCAmelCase , _lowerCAmelCase )
if j > 0:
UpperCAmelCase_ =self.blend_h(row[j - 1] , _lowerCAmelCase , _lowerCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_lowerCAmelCase , dim=3 ) )
UpperCAmelCase_ =torch.cat(_lowerCAmelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
UpperCAmelCase_ =sample
UpperCAmelCase_ =self.encode(_lowerCAmelCase ).latent_dist
if sample_posterior:
UpperCAmelCase_ =posterior.sample(generator=_lowerCAmelCase )
else:
UpperCAmelCase_ =posterior.mode()
UpperCAmelCase_ =self.decode(_lowerCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCAmelCase )
| 54 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_UpperCAmelCase : Optional[int] = pytest.mark.integration
@pytest.mark.parametrize('''path''' ,['''paws''', '''csv'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Dict:
inspect_dataset(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' ,['''accuracy'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> int:
inspect_metric(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : List[str] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
_UpperCamelCase : List[str] = get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[str]:
with pytest.raises(UpperCamelCase ):
get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
@pytest.mark.parametrize(
'''path, expected''' ,[
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : int = get_dataset_config_names(UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' ,[
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
_UpperCamelCase : Dict = get_dataset_infos(UpperCamelCase )
assert list(infos.keys() ) == expected_configs
_UpperCamelCase : Dict = expected_configs[0]
assert expected_config in infos
_UpperCamelCase : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = get_dataset_infos(UpperCamelCase )
assert expected_config in infos
_UpperCamelCase : Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
with pytest.raises(UpperCamelCase ):
get_dataset_split_names(UpperCamelCase ,config_name=UpperCamelCase )
| 683 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = BertJapaneseTokenizer
snake_case_ = False
snake_case_ = True
def UpperCamelCase_ ( self : Any ):
super().setUp()
__A = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self : Dict ,A : Optional[Any] ):
__A = "こんにちは、世界。 \nこんばんは、世界。"
__A = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def UpperCamelCase_ ( self : int ,A : Tuple ):
__A , __A = self.get_input_output_texts(A )
__A = tokenizer.encode(A ,add_special_tokens=A )
__A = tokenizer.decode(A ,clean_up_tokenization_spaces=A )
return text, ids
def UpperCamelCase_ ( self : Any ):
pass # TODO add if relevant
def UpperCamelCase_ ( self : Tuple ):
pass # TODO add if relevant
def UpperCamelCase_ ( self : List[Any] ):
pass # TODO add if relevant
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.tokenizer_class(self.vocab_file )
__A = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(A ,["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
def UpperCamelCase_ ( self : List[str] ):
__A = self.tokenizer_class(self.vocab_file ,word_tokenizer_type="mecab" )
self.assertIsNotNone(A )
__A = "こんにちは、世界。\nこんばんは、世界。"
__A = tokenizer.tokenize(A )
self.assertListEqual(A ,["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
__A = os.path.join(self.tmpdirname ,"tokenizer.bin" )
with open(A ,"wb" ) as handle:
pickle.dump(A ,A )
with open(A ,"rb" ) as handle:
__A = pickle.load(A )
__A = tokenizer_new.tokenize(A )
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] ,)
def UpperCamelCase_ ( self : Union[str, Any] ):
try:
__A = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] ,)
def UpperCamelCase_ ( self : Dict ):
try:
__A = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] ,)
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = MecabTokenizer(do_lower_case=A ,mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] ,)
def UpperCamelCase_ ( self : Union[str, Any] ):
try:
__A = MecabTokenizer(
do_lower_case=A ,normalize_text=A ,mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] ,)
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = MecabTokenizer(normalize_text=A ,mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] ,)
@require_sudachi
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.tokenizer_class(self.vocab_file ,word_tokenizer_type="sudachi" )
self.assertIsNotNone(A )
__A = "こんにちは、世界。\nこんばんは、世界。"
__A = tokenizer.tokenize(A )
self.assertListEqual(A ,["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
__A = os.path.join(self.tmpdirname ,"tokenizer.bin" )
with open(A ,"wb" ) as handle:
pickle.dump(A ,A )
with open(A ,"rb" ) as handle:
__A = pickle.load(A )
__A = tokenizer_new.tokenize(A )
self.assertListEqual(A ,A )
@require_sudachi
def UpperCamelCase_ ( self : int ):
__A = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,[" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] ,)
@require_sudachi
def UpperCamelCase_ ( self : str ):
__A = SudachiTokenizer(sudachi_dict_type="core" ,sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) ,["外国", "人", "参政", "権"] )
@require_sudachi
def UpperCamelCase_ ( self : Optional[Any] ):
__A = SudachiTokenizer(sudachi_dict_type="core" ,sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) ,["外国人", "参政権"] )
@require_sudachi
def UpperCamelCase_ ( self : int ):
__A = SudachiTokenizer(sudachi_dict_type="core" ,sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) ,["外国人参政権"] )
@require_sudachi
def UpperCamelCase_ ( self : Tuple ):
__A = SudachiTokenizer(do_lower_case=A ,sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,[" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] ,)
@require_sudachi
def UpperCamelCase_ ( self : List[Any] ):
__A = SudachiTokenizer(normalize_text=A ,sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,[" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] ,)
@require_sudachi
def UpperCamelCase_ ( self : Optional[Any] ):
__A = SudachiTokenizer(trim_whitespace=A ,sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] ,)
@require_jumanpp
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.tokenizer_class(self.vocab_file ,word_tokenizer_type="jumanpp" )
self.assertIsNotNone(A )
__A = "こんにちは、世界。\nこんばんは、世界。"
__A = tokenizer.tokenize(A )
self.assertListEqual(A ,["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
__A = os.path.join(self.tmpdirname ,"tokenizer.bin" )
with open(A ,"wb" ) as handle:
pickle.dump(A ,A )
with open(A ,"rb" ) as handle:
__A = pickle.load(A )
__A = tokenizer_new.tokenize(A )
self.assertListEqual(A ,A )
@require_jumanpp
def UpperCamelCase_ ( self : Any ):
__A = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] ,)
@require_jumanpp
def UpperCamelCase_ ( self : Any ):
__A = JumanppTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] ,)
@require_jumanpp
def UpperCamelCase_ ( self : List[str] ):
__A = JumanppTokenizer(normalize_text=A )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] ,)
@require_jumanpp
def UpperCamelCase_ ( self : Tuple ):
__A = JumanppTokenizer(trim_whitespace=A )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] ,)
@require_jumanpp
def UpperCamelCase_ ( self : Tuple ):
__A = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) ,["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] ,)
def UpperCamelCase_ ( self : List[str] ):
__A = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
__A = {}
for i, token in enumerate(A ):
__A = i
__A = WordpieceTokenizer(vocab=A ,unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) ,[] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) ,["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) ,["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) ,["こん", "##ばんは", "[UNK]", "こんにちは"] )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
__A = tokenizer.subword_tokenizer
__A = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(A ,["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
__A = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(A ,["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
__A = tokenizer.encode("ありがとう。" ,add_special_tokens=A )
__A = tokenizer.encode("どういたしまして。" ,add_special_tokens=A )
__A = tokenizer.build_inputs_with_special_tokens(A )
__A = tokenizer.build_inputs_with_special_tokens(A ,A )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = BertJapaneseTokenizer
snake_case_ = False
def UpperCamelCase_ ( self : List[Any] ):
super().setUp()
__A = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self : int ,**A : str ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname ,subword_tokenizer_type="character" ,**A )
def UpperCamelCase_ ( self : Optional[int] ,A : Union[str, Any] ):
__A = "こんにちは、世界。 \nこんばんは、世界。"
__A = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def UpperCamelCase_ ( self : Optional[Any] ):
pass # TODO add if relevant
def UpperCamelCase_ ( self : str ):
pass # TODO add if relevant
def UpperCamelCase_ ( self : List[str] ):
pass # TODO add if relevant
def UpperCamelCase_ ( self : Any ):
__A = self.tokenizer_class(self.vocab_file ,subword_tokenizer_type="character" )
__A = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
A ,["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) ,[3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
__A = {}
for i, token in enumerate(A ):
__A = i
__A = CharacterTokenizer(vocab=A ,unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) ,[] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) ,["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) ,["こ", "ん", "に", "ち", "[UNK]"] )
def UpperCamelCase_ ( self : str ):
__A = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
__A = tokenizer.encode("ありがとう。" ,add_special_tokens=A )
__A = tokenizer.encode("どういたしまして。" ,add_special_tokens=A )
__A = tokenizer.build_inputs_with_special_tokens(A )
__A = tokenizer.build_inputs_with_special_tokens(A ,A )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = "cl-tohoku/bert-base-japanese"
__A = AutoTokenizer.from_pretrained(A )
self.assertIsInstance(A ,A )
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[Any] ):
__A = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" ,level="WARNING" ) as cm:
BertTokenizer.from_pretrained(A )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
__A = "bert-base-cased"
with self.assertLogs("transformers" ,level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(A )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 55 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCamelCase : Any = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def _lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def _lowercase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase : Optional[int] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
_UpperCamelCase : int = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Tuple = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_UpperCamelCase : int = DDPMScheduler()
_UpperCamelCase : Optional[int] = AudioDiffusionPipeline(vqvae=_snake_case , unet=self.dummy_unet , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case , steps=4 )
_UpperCamelCase : Union[str, Any] = output.audios[0]
_UpperCamelCase : Union[str, Any] = output.images[0]
_UpperCamelCase : str = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : int = pipe(generator=_snake_case , steps=4 , return_dict=_snake_case )
_UpperCamelCase : int = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_UpperCamelCase : List[str] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : List[str] = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : int = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_UpperCamelCase : Dict = DDIMScheduler()
_UpperCamelCase : str = self.dummy_vqvae_and_unet
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : Optional[Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_UpperCamelCase : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Tuple = pipe(raw_audio=_snake_case , generator=_snake_case , start_step=5 , steps=10 )
_UpperCamelCase : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_UpperCamelCase : Any = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Tuple = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : Any = self.dummy_unet_condition
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_snake_case , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : Union[str, Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : int = torch.rand((1, 1, 10) )
_UpperCamelCase : Optional[Any] = pipe(generator=_snake_case , encoding=_snake_case )
_UpperCamelCase : Dict = output.images[0]
_UpperCamelCase : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Any = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = torch_device
_UpperCamelCase : int = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
_UpperCamelCase : str = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case )
_UpperCamelCase : List[Any] = output.audios[0]
_UpperCamelCase : List[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_UpperCamelCase : Union[str, Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Union[str, Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 683 | 0 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
_a : List[Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
_a : int = typing.Union[np.floataa, int, float] # noqa: UP007
def _a (lowercase__ : Vector , lowercase__ : Vector ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(lowercase__ ) - np.asarray(lowercase__ )) ** 2 ) )
def _a (lowercase__ : Vector , lowercase__ : Vector ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(lowercase__ , lowercase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def _a () -> None:
"""simple docstring"""
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=1_0_0_0_0 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=1_0_0_0_0 , globals=globals() , ) )
benchmark()
| 56 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCAmelCase : Tuple = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 683 | 0 |
import requests
from bsa import BeautifulSoup
def snake_case (UpperCAmelCase__ = "AAPL" ) -> str:
UpperCamelCase_: List[Any] = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
UpperCamelCase_: List[Any] = BeautifulSoup(requests.get(UpperCAmelCase__ ).text , 'html.parser' )
UpperCamelCase_: List[Any] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''') | 57 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : Optional[int] = {
"""distilbert-base-uncased""": 512,
"""distilbert-base-uncased-distilled-squad""": 512,
"""distilbert-base-cased""": 512,
"""distilbert-base-cased-distilled-squad""": 512,
"""distilbert-base-german-cased""": 512,
"""distilbert-base-multilingual-cased""": 512,
}
_UpperCAmelCase : Any = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : Dict = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A__ : Union[str, Any] = ['input_ids', 'attention_mask']
A__ : Tuple = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ) -> int:
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
_UpperCamelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _snake_case ) != tokenize_chinese_chars
):
_UpperCamelCase : int = getattr(_snake_case , normalizer_state.pop('''type''' ) )
_UpperCamelCase : Optional[int] = do_lower_case
_UpperCamelCase : Dict = strip_accents
_UpperCamelCase : List[Any] = tokenize_chinese_chars
_UpperCamelCase : Tuple = normalizer_class(**_snake_case )
_UpperCamelCase : Dict = do_lower_case
def _lowercase ( self , _snake_case , _snake_case=None ) -> Optional[int]:
_UpperCamelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , _snake_case , _snake_case = None ) -> List[int]:
_UpperCamelCase : Union[str, Any] = [self.sep_token_id]
_UpperCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
_UpperCamelCase : Optional[Any] = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 683 | 0 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@register_to_config
def __init__( self , _lowercase = 1_2_8 , _lowercase = 2_5_6 , _lowercase = 2000.0 , _lowercase = 7_6_8 , _lowercase = 1_2 , _lowercase = 1_2 , _lowercase = 6_4 , _lowercase = 2_0_4_8 , _lowercase = 0.1 , ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[Any] = nn.Sequential(
nn.Linear(_lowercase , d_model * 4 , bias=_lowercase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowercase ) , nn.SiLU() , )
snake_case_ : Any = nn.Embedding(_lowercase , _lowercase )
snake_case_ : Union[str, Any] = False
snake_case_ : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Union[str, Any] = nn.Dropout(p=_lowercase )
snake_case_ : Tuple = nn.ModuleList()
for lyr_num in range(_lowercase ):
# FiLM conditional T5 decoder
snake_case_ : Union[str, Any] = DecoderLayer(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
self.decoders.append(_lowercase )
snake_case_ : List[Any] = TaLayerNorm(_lowercase )
snake_case_ : Optional[Any] = nn.Dropout(p=_lowercase )
snake_case_ : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
snake_case_ : Optional[int] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
snake_case_ : int = self.conditioning_emb(_lowercase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
snake_case_ : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
snake_case_ : Dict = torch.broadcast_to(
torch.arange(_lowercase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
snake_case_ : Tuple = self.position_encoding(_lowercase )
snake_case_ : Optional[Any] = self.continuous_inputs_projection(_lowercase )
inputs += position_encodings
snake_case_ : List[Any] = self.dropout(_lowercase )
# decoder: No padding present.
snake_case_ : Tuple = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
snake_case_ : int = [(x, self.encoder_decoder_mask(_lowercase , _lowercase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
snake_case_ : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
snake_case_ : str = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
snake_case_ : int = lyr(
_lowercase , conditioning_emb=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )[0]
snake_case_ : int = self.decoder_norm(_lowercase )
snake_case_ : Union[str, Any] = self.post_dropout(_lowercase )
snake_case_ : int = self.spec_out(_lowercase )
return spec_out
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=1E-6 ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
snake_case_ : Any = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase ) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = self.layer[0](
_lowercase , conditioning_emb=_lowercase , attention_mask=_lowercase , )
if encoder_hidden_states is not None:
snake_case_ : Tuple = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
snake_case_ : str = self.layer[1](
_lowercase , key_value_states=_lowercase , attention_mask=_lowercase , )
# Apply Film Conditional Feed Forward layer
snake_case_ : Any = self.layer[-1](_lowercase , _lowercase )
return (hidden_states,)
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : Any = TaLayerNorm(_lowercase )
snake_case_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
snake_case_ : Union[str, Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
snake_case_ : List[Any] = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.layer_norm(_lowercase )
if conditioning_emb is not None:
snake_case_ : str = self.FiLMLayer(_lowercase , _lowercase )
# Self-attention block
snake_case_ : List[Any] = self.attention(_lowercase )
snake_case_ : List[str] = hidden_states + self.dropout(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
snake_case_ : Union[str, Any] = TaLayerNorm(_lowercase , eps=_lowercase )
snake_case_ : Optional[Any] = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.layer_norm(_lowercase )
snake_case_ : Optional[Any] = self.attention(
_lowercase , encoder_hidden_states=_lowercase , attention_mask=attention_mask.squeeze(1 ) , )
snake_case_ : Any = hidden_states + self.dropout(_lowercase )
return layer_output
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : Tuple = TaDenseGatedActDense(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
snake_case_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
snake_case_ : Optional[int] = TaLayerNorm(_lowercase , eps=_lowercase )
snake_case_ : Tuple = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = self.layer_norm(_lowercase )
if conditioning_emb is not None:
snake_case_ : Optional[int] = self.film(_lowercase , _lowercase )
snake_case_ : int = self.DenseReluDense(_lowercase )
snake_case_ : Optional[Any] = hidden_states + self.dropout(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Any = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : int = nn.Dropout(_lowercase )
snake_case_ : Optional[int] = NewGELUActivation()
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : str = self.act(self.wi_a(_lowercase ) )
snake_case_ : Dict = self.wi_a(_lowercase )
snake_case_ : Any = hidden_gelu * hidden_linear
snake_case_ : List[Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.wo(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1E-6 ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : Union[str, Any] = nn.Parameter(torch.ones(_lowercase ) )
snake_case_ : int = eps
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_lowercase )
snake_case_ : Any = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
snake_case_ : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def UpperCAmelCase__ ( self , _lowercase ) -> torch.Tensor:
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(_lowercase , 3.0 )) ))
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = nn.Linear(_lowercase , out_features * 2 , bias=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.scale_bias(_lowercase )
snake_case_ , snake_case_ : Any = torch.chunk(_lowercase , 2 , -1 )
snake_case_ : Optional[Any] = x * (1 + scale) + shift
return x
| 58 |
'''simple docstring'''
def snake_case__ ( UpperCamelCase ) -> list:
_UpperCamelCase : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCamelCase : List[str] = True
for i in range(0 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : int = False
for i in range(1 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : Optional[int] = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase : Optional[int] = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase : Union[str, Any] = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 683 | 0 |
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def lowerCAmelCase_ ( __a ) -> list[tuple[int, int]]:
"""simple docstring"""
lowerCamelCase__: Dict =0
lowerCamelCase__: Union[str, Any] =len(__a ) # No of vertices in graph
lowerCamelCase__: Optional[int] =[0] * n
lowerCamelCase__: List[str] =[False] * n
def dfs(__a , __a , __a , __a ):
lowerCamelCase__: Dict =True
lowerCamelCase__: List[Any] =id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__a , __a , __a , id_ )
lowerCamelCase__: str =min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCamelCase__: str =min(low[at] , low[to] )
lowerCamelCase__: list[tuple[int, int]] =[]
for i in range(__a ):
if not visited[i]:
dfs(__a , -1 , __a , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = checkpoint
_UpperCamelCase : int = {}
_UpperCamelCase : int = vae_state_dict['''encoder.conv_in.weight''']
_UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_in.bias''']
_UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_out.weight''']
_UpperCamelCase : Any = vae_state_dict['''encoder.conv_out.bias''']
_UpperCamelCase : List[Any] = vae_state_dict['''encoder.norm_out.weight''']
_UpperCamelCase : str = vae_state_dict['''encoder.norm_out.bias''']
_UpperCamelCase : str = vae_state_dict['''decoder.conv_in.weight''']
_UpperCamelCase : List[Any] = vae_state_dict['''decoder.conv_in.bias''']
_UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.weight''']
_UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.bias''']
_UpperCamelCase : int = vae_state_dict['''decoder.norm_out.weight''']
_UpperCamelCase : Dict = vae_state_dict['''decoder.norm_out.bias''']
_UpperCamelCase : Optional[int] = vae_state_dict['''quant_conv.weight''']
_UpperCamelCase : int = vae_state_dict['''quant_conv.bias''']
_UpperCamelCase : List[Any] = vae_state_dict['''post_quant_conv.weight''']
_UpperCamelCase : Optional[int] = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
_UpperCamelCase : Optional[int] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
_UpperCamelCase : Tuple = {
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
_UpperCamelCase : Any = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
_UpperCamelCase : int = {
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(UpperCamelCase )
}
for i in range(UpperCamelCase ):
_UpperCamelCase : Any = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
_UpperCamelCase : Optional[int] = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
_UpperCamelCase : Dict = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
_UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Union[str, Any] = {'''old''': f'''down.{i}.block''', '''new''': f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : List[str] = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
_UpperCamelCase : Tuple = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
_UpperCamelCase : Optional[int] = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
_UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Tuple = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : Tuple = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
_UpperCamelCase : List[str] = renew_vae_attention_paths(UpperCamelCase )
_UpperCamelCase : Any = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
for i in range(UpperCamelCase ):
_UpperCamelCase : Union[str, Any] = num_up_blocks - 1 - i
_UpperCamelCase : Optional[int] = [
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
_UpperCamelCase : Tuple = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
_UpperCamelCase : Any = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
_UpperCamelCase : Any = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Any = {'''old''': f'''up.{block_id}.block''', '''new''': f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : List[Any] = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
_UpperCamelCase : Optional[Any] = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
_UpperCamelCase : int = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
_UpperCamelCase : Optional[int] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Optional[Any] = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : Tuple = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
_UpperCamelCase : Tuple = renew_vae_attention_paths(UpperCamelCase )
_UpperCamelCase : Dict = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
return new_checkpoint
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,) -> List[str]:
# Only support V1
_UpperCamelCase : Tuple = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
_UpperCamelCase : List[Any] = io.BytesIO(r.content )
_UpperCamelCase : Optional[int] = OmegaConf.load(UpperCamelCase )
_UpperCamelCase : str = 5_12
_UpperCamelCase : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
_UpperCamelCase : str = {}
with safe_open(UpperCamelCase ,framework='''pt''' ,device='''cpu''' ) as f:
for key in f.keys():
_UpperCamelCase : Union[str, Any] = f.get_tensor(UpperCamelCase )
else:
_UpperCamelCase : str = torch.load(UpperCamelCase ,map_location=UpperCamelCase )['''state_dict''']
# Convert the VAE model.
_UpperCamelCase : Dict = create_vae_diffusers_config(UpperCamelCase ,image_size=UpperCamelCase )
_UpperCamelCase : str = custom_convert_ldm_vae_checkpoint(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Dict = AutoencoderKL(**UpperCamelCase )
vae.load_state_dict(UpperCamelCase )
vae.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
_UpperCAmelCase : int = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 683 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.