code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
UpperCamelCase__ :Optional[Any] = get_logger(__name__)
class A( enum.Enum ):
"""simple docstring"""
A = "all_checks"
A = "basic_checks"
A = "no_checks"
class A( __lowercase ):
"""simple docstring"""
pass
class A( __lowercase ):
"""simple docstring"""
pass
class A( __lowercase ):
"""simple docstring"""
pass
class A( __lowercase ):
"""simple docstring"""
pass
def A_ ( snake_case__ , snake_case__ , snake_case__=None ) -> Tuple:
if expected_checksums is None:
logger.info('''Unable to verify checksums.''' )
return
if len(set(snake_case__ ) - set(snake_case__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(snake_case__ ) - set(snake_case__ ) ) )
if len(set(snake_case__ ) - set(snake_case__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(snake_case__ ) - set(snake_case__ ) ) )
_UpperCamelCase :Dict = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_UpperCamelCase :Optional[Any] = ''' for ''' + verification_name if verification_name is not None else ''''''
if len(snake_case__ ) > 0:
raise NonMatchingChecksumError(
f"Checksums didn\'t match{for_verification_name}:\n"
f"{bad_urls}\n"
'''Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error''' )
logger.info('''All the checksums matched successfully''' + for_verification_name )
class A( __lowercase ):
"""simple docstring"""
pass
class A( __lowercase ):
"""simple docstring"""
pass
class A( __lowercase ):
"""simple docstring"""
pass
class A( __lowercase ):
"""simple docstring"""
pass
def A_ ( snake_case__ , snake_case__ ) -> Any:
if expected_splits is None:
logger.info('''Unable to verify splits sizes.''' )
return
if len(set(snake_case__ ) - set(snake_case__ ) ) > 0:
raise ExpectedMoreSplits(str(set(snake_case__ ) - set(snake_case__ ) ) )
if len(set(snake_case__ ) - set(snake_case__ ) ) > 0:
raise UnexpectedSplits(str(set(snake_case__ ) - set(snake_case__ ) ) )
_UpperCamelCase :Any = [
{'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(snake_case__ ) > 0:
raise NonMatchingSplitsSizesError(str(snake_case__ ) )
logger.info('''All the splits matched successfully.''' )
def A_ ( snake_case__ , snake_case__ = True ) -> dict:
if record_checksum:
_UpperCamelCase :List[Any] = shaaaa()
with open(snake_case__ , '''rb''' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B'''''' ):
m.update(snake_case__ )
_UpperCamelCase :str = m.hexdigest()
else:
_UpperCamelCase :Tuple = None
return {"num_bytes": os.path.getsize(snake_case__ ), "checksum": checksum}
def A_ ( snake_case__ ) -> Dict:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 355
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : int = (KDPMaDiscreteScheduler,)
UpperCamelCase_ : Optional[int] = 10
def A_ ( self , **a ) -> int:
'''simple docstring'''
_UpperCamelCase = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**a )
return config
def A_ ( self ) -> List[str]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a )
def A_ ( self ) -> List[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=a , beta_end=a )
def A_ ( self ) -> Any:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def A_ ( self ) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def A_ ( self ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
_UpperCamelCase = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(a , a )
_UpperCamelCase = model(a , a )
_UpperCamelCase = scheduler.step(a , a , a )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(a ) )
_UpperCamelCase = torch.mean(torch.abs(a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def A_ ( self ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(a , a )
_UpperCamelCase = model(a , a )
_UpperCamelCase = scheduler.step(a , a , a )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(a ) )
_UpperCamelCase = torch.mean(torch.abs(a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def A_ ( self ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(a , a )
_UpperCamelCase = model(a , a )
_UpperCamelCase = scheduler.step(a , a , a )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(a ) )
_UpperCamelCase = torch.mean(torch.abs(a ) )
if str(a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 612
| 0
|
from __future__ import annotations
import requests
__UpperCAmelCase = set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def A_ ( lowercase_ , lowercase_ = 1 , lowercase_ = "new" , lowercase_ = None ) ->dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowercase_ ) - valid_terms ) ):
SCREAMING_SNAKE_CASE = f'''Invalid search term: {invalid_search_terms}'''
raise ValueError(lowercase_ )
SCREAMING_SNAKE_CASE = requests.get(
f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={'User-agent': 'A random string'} , )
if response.status_code == 4_2_9:
raise requests.HTTPError
SCREAMING_SNAKE_CASE = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowercase_ )}
SCREAMING_SNAKE_CASE = {}
for id_ in range(lowercase_ ):
SCREAMING_SNAKE_CASE = {
item: data['data']['children'][id_]['data'][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 259
|
def A_ ( lowercase_ , lowercase_ ) ->str:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(lowercase_ , lowercase_ ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
SCREAMING_SNAKE_CASE = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(lowercase_ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259
| 1
|
import math
import unittest
def lowerCAmelCase( __lowerCamelCase ):
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self ) -> int:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
with self.assertRaises(UpperCAmelCase ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 559
|
import re
from filelock import FileLock
try:
import nltk
lowerCamelCase_ : int = True
except (ImportError, ModuleNotFoundError):
lowerCamelCase_ : Any = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def lowerCAmelCase( __lowerCamelCase ):
re.sub('<n>' , '' , __lowerCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowerCamelCase ) )
| 559
| 1
|
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
A__ : Any = get_logger(__name__)
class _lowercase ( enum.Enum ):
'''simple docstring'''
_A = 'all_checks'
_A = 'basic_checks'
_A = 'no_checks'
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def a__ ( lowerCAmelCase : Optional[dict] , lowerCAmelCase : dict , lowerCAmelCase : int=None ):
'''simple docstring'''
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) )
if len(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) )
UpperCAmelCase__ : List[str] = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
UpperCAmelCase__ : List[str] = " for " + verification_name if verification_name is not None else ""
if len(lowerCAmelCase ) > 0:
raise NonMatchingChecksumError(
F"Checksums didn't match{for_verification_name}:\n"
F"{bad_urls}\n"
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def a__ ( lowerCAmelCase : Optional[dict] , lowerCAmelCase : dict ):
'''simple docstring'''
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) )
if len(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) )
UpperCAmelCase__ : Tuple = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowerCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(lowerCAmelCase ) )
logger.info("All the splits matched successfully." )
def a__ ( lowerCAmelCase : str , lowerCAmelCase : bool = True ):
'''simple docstring'''
if record_checksum:
UpperCAmelCase__ : int = shaaaa()
with open(lowerCAmelCase , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b"" ):
m.update(lowerCAmelCase )
UpperCAmelCase__ : int = m.hexdigest()
else:
UpperCAmelCase__ : int = None
return {"num_bytes": os.path.getsize(lowerCAmelCase ), "checksum": checksum}
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 660
|
"""simple docstring"""
from manim import *
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : str = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Union[str, Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[Any] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : int = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = VGroup(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("CPU" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Union[str, Any] = Text("GPU" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Model" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
rect.set_stroke(__UpperCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase__ : int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCamelCase , buff=0.0 )
self.add(__UpperCamelCase )
cpu_targs.append(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Any = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Loaded Checkpoint" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , aligned_edge=__UpperCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ : Any = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : str = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__UpperCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase__ : Optional[Any] = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCamelCase ) , Write(__UpperCamelCase ) )
self.play(Write(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = fill.copy().set_fill(__UpperCamelCase , opacity=0.7 )
target.move_to(__UpperCamelCase )
first_animations.append(GrowFromCenter(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCamelCase , run_time=1.5 ) )
self.play(*__UpperCamelCase )
self.play(*__UpperCamelCase )
self.wait()
| 660
| 1
|
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase=0.9_99 , __UpperCamelCase="cosine" , ) -> str:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCamelCase ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase_ : str = []
for i in range(__UpperCamelCase ):
lowerCAmelCase_ : Tuple = i / num_diffusion_timesteps
lowerCAmelCase_ : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) , __UpperCamelCase ) )
return torch.tensor(__UpperCamelCase , dtype=torch.floataa )
class __lowerCamelCase ( A__ , A__ ):
'''simple docstring'''
a_ : Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
a_ : List[Any] = 2
@register_to_config
def __init__( self : Any , a_ : int = 10_00 , a_ : float = 0.00085 , a_ : float = 0.012 , a_ : str = "linear" , a_ : Optional[Union[np.ndarray, List[float]]] = None , a_ : str = "epsilon" , a_ : str = "linspace" , a_ : int = 0 , ):
if trained_betas is not None:
lowerCAmelCase_ : Optional[Any] = torch.tensor(a_ , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase_ : int = torch.linspace(a_ , a_ , a_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase_ : List[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase_ : Tuple = betas_for_alpha_bar(a_ )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCAmelCase_ : List[Any] = 1.0 - self.betas
lowerCAmelCase_ : int = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a_ , a_ , a_ )
def lowerCamelCase ( self : List[str] , a_ : int , a_ : Any=None ):
if schedule_timesteps is None:
lowerCAmelCase_ : str = self.timesteps
lowerCAmelCase_ : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCAmelCase_ : List[str] = 1 if len(a_ ) > 1 else 0
else:
lowerCAmelCase_ : Any = timestep.cpu().item() if torch.is_tensor(a_ ) else timestep
lowerCAmelCase_ : List[Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCamelCase ( self : Dict ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCamelCase ( self : Dict , a_ : torch.FloatTensor , a_ : Union[float, torch.FloatTensor] , ):
lowerCAmelCase_ : Optional[Any] = self.index_for_timestep(a_ )
if self.state_in_first_order:
lowerCAmelCase_ : Any = self.sigmas[step_index]
else:
lowerCAmelCase_ : Optional[Any] = self.sigmas_interpol[step_index]
lowerCAmelCase_ : str = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCamelCase ( self : Tuple , a_ : int , a_ : Union[str, torch.device] = None , a_ : Optional[int] = None , ):
lowerCAmelCase_ : List[str] = num_inference_steps
lowerCAmelCase_ : List[str] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase_ : int = np.linspace(0 , num_train_timesteps - 1 , a_ , dtype=a_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase_ : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase_ : Any = (np.arange(0 , a_ ) * step_ratio).round()[::-1].copy().astype(a_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase_ : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase_ : str = (np.arange(a_ , 0 , -step_ratio )).round().copy().astype(a_ )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCAmelCase_ : Any = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCAmelCase_ : Any = torch.from_numpy(np.log(a_ ) ).to(a_ )
lowerCAmelCase_ : str = np.interp(a_ , np.arange(0 , len(a_ ) ) , a_ )
lowerCAmelCase_ : Optional[int] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCAmelCase_ : Optional[int] = torch.from_numpy(a_ ).to(device=a_ )
# interpolate sigmas
lowerCAmelCase_ : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
lowerCAmelCase_ : Dict = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
lowerCAmelCase_ : Tuple = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(a_ ).startswith("mps" ):
# mps does not support float64
lowerCAmelCase_ : Tuple = torch.from_numpy(a_ ).to(a_ , dtype=torch.floataa )
else:
lowerCAmelCase_ : str = torch.from_numpy(a_ ).to(a_ )
# interpolate timesteps
lowerCAmelCase_ : Dict = self.sigma_to_t(a_ ).to(a_ , dtype=timesteps.dtype )
lowerCAmelCase_ : List[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
lowerCAmelCase_ : Optional[int] = torch.cat([timesteps[:1], interleaved_timesteps] )
lowerCAmelCase_ : List[str] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase_ : Optional[int] = defaultdict(a_ )
def lowerCamelCase ( self : Union[str, Any] , a_ : int ):
# get log sigma
lowerCAmelCase_ : Tuple = sigma.log()
# get distribution
lowerCAmelCase_ : Union[str, Any] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
lowerCAmelCase_ : List[str] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
lowerCAmelCase_ : Union[str, Any] = low_idx + 1
lowerCAmelCase_ : Dict = self.log_sigmas[low_idx]
lowerCAmelCase_ : Union[str, Any] = self.log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase_ : List[Any] = (low - log_sigma) / (low - high)
lowerCAmelCase_ : Dict = w.clamp(0 , 1 )
# transform interpolation to time range
lowerCAmelCase_ : Tuple = (1 - w) * low_idx + w * high_idx
lowerCAmelCase_ : Union[str, Any] = t.view(sigma.shape )
return t
@property
def lowerCamelCase ( self : str ):
return self.sample is None
def lowerCamelCase ( self : Any , a_ : Union[torch.FloatTensor, np.ndarray] , a_ : Union[float, torch.FloatTensor] , a_ : Union[torch.FloatTensor, np.ndarray] , a_ : bool = True , ):
lowerCAmelCase_ : Dict = self.index_for_timestep(a_ )
# advance index counter by 1
lowerCAmelCase_ : Optional[Any] = timestep.cpu().item() if torch.is_tensor(a_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase_ : List[Any] = self.sigmas[step_index]
lowerCAmelCase_ : Tuple = self.sigmas_interpol[step_index + 1]
lowerCAmelCase_ : Any = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
lowerCAmelCase_ : int = self.sigmas[step_index - 1]
lowerCAmelCase_ : Any = self.sigmas_interpol[step_index]
lowerCAmelCase_ : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase_ : str = sigma_hat if self.state_in_first_order else sigma_interpol
lowerCAmelCase_ : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase_ : Any = sigma_hat if self.state_in_first_order else sigma_interpol
lowerCAmelCase_ : Dict = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase_ : Tuple = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase_ : str = sigma_interpol - sigma_hat
# store for 2nd order step
lowerCAmelCase_ : List[str] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
lowerCAmelCase_ : Optional[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
lowerCAmelCase_ : Optional[Any] = sigma_next - sigma_hat
lowerCAmelCase_ : Tuple = self.sample
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : List[str] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a_ )
def lowerCamelCase ( self : List[str] , a_ : torch.FloatTensor , a_ : torch.FloatTensor , a_ : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowerCAmelCase_ : List[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a_ ):
# mps does not support float64
lowerCAmelCase_ : str = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCAmelCase_ : Tuple = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCAmelCase_ : Optional[Any] = self.timesteps.to(original_samples.device )
lowerCAmelCase_ : Optional[int] = timesteps.to(original_samples.device )
lowerCAmelCase_ : Optional[int] = [self.index_for_timestep(a_ , a_ ) for t in timesteps]
lowerCAmelCase_ : str = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCAmelCase_ : Dict = sigma.unsqueeze(-1 )
lowerCAmelCase_ : int = original_samples + noise * sigma
return noisy_samples
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 610
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Union[str, Any] = ["""image_processor""", """tokenizer"""]
a_ : Any = """FlavaImageProcessor"""
a_ : Tuple = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Any , a_ : List[str]=None , a_ : Dict=None , **a_ : Tuple ):
lowerCAmelCase_ : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a_ , )
lowerCAmelCase_ : List[Any] = kwargs.pop("feature_extractor" )
lowerCAmelCase_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a_ , a_ )
lowerCAmelCase_ : Optional[int] = self.image_processor
def __call__( self : str , a_ : Optional[ImageInput] = None , a_ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , a_ : bool = True , a_ : Union[bool, str, PaddingStrategy] = False , a_ : Union[bool, str, TruncationStrategy] = False , a_ : Optional[int] = None , a_ : int = 0 , a_ : Optional[int] = None , a_ : Optional[bool] = None , a_ : Optional[bool] = None , a_ : Optional[bool] = None , a_ : Optional[bool] = None , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = True , a_ : Optional[Union[str, TensorType]] = None , **a_ : Optional[Any] , ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowerCAmelCase_ : Any = self.tokenizer(
text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
if images is not None:
lowerCAmelCase_ : str = self.image_processor(
a_ , return_image_mask=a_ , return_codebook_pixels=a_ , return_tensors=a_ , **a_ , )
if text is not None and images is not None:
encoding.update(a_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def lowerCamelCase ( self : Optional[int] , *a_ : Optional[Any] , **a_ : Dict ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def lowerCamelCase ( self : Tuple , *a_ : Optional[int] , **a_ : Optional[int] ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : str = self.tokenizer.model_input_names
lowerCAmelCase_ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase ( self : Optional[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a_ , )
return self.image_processor_class
@property
def lowerCamelCase ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a_ , )
return self.image_processor
| 610
| 1
|
'''simple docstring'''
import re
def _a ( lowerCamelCase_ ):
snake_case : Optional[Any] =re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) )
if __name__ == "__main__":
A : Tuple = """0094702343221"""
print(is_sri_lankan_phone_number(phone))
| 136
|
'''simple docstring'''
def _a ( ):
for n in range(1 , 1_00_00_00 ):
yield n * (n + 1) // 2
def _a ( lowerCamelCase_ ):
snake_case : Optional[Any] =1
snake_case : int =2
while i * i <= n:
snake_case : Any =0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _a ( ):
return next(i for i in triangle_number_generator() if count_divisors(lowerCamelCase_ ) > 5_00 )
if __name__ == "__main__":
print(solution())
| 136
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase : List[Any] = {
"configuration_bridgetower": [
"BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BridgeTowerConfig",
"BridgeTowerTextConfig",
"BridgeTowerVisionConfig",
],
"processing_bridgetower": ["BridgeTowerProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ["BridgeTowerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
"BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST",
"BridgeTowerForContrastiveLearning",
"BridgeTowerForImageAndTextRetrieval",
"BridgeTowerForMaskedLM",
"BridgeTowerModel",
"BridgeTowerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 627
|
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def __lowerCamelCase ( _UpperCamelCase : Optional[int] ):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def __lowerCamelCase ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = '''mock-s3-bucket'''
UpperCAmelCase_ = F"""s3://{mock_bucket}"""
UpperCAmelCase_ = extract_path_from_uri(_UpperCamelCase )
assert dataset_path.startswith('''s3://''' ) is False
UpperCAmelCase_ = '''./local/path'''
UpperCAmelCase_ = extract_path_from_uri(_UpperCamelCase )
assert dataset_path == new_dataset_path
def __lowerCamelCase ( _UpperCamelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = is_remote_filesystem(_UpperCamelCase )
assert is_remote is True
UpperCAmelCase_ = fsspec.filesystem('''file''' )
UpperCAmelCase_ = is_remote_filesystem(_UpperCamelCase )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , _UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
UpperCAmelCase_ = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCAmelCase_ = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCamelCase )
UpperCAmelCase_ = fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCamelCase )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = os.path.basename(_UpperCamelCase )
UpperCAmelCase_ = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f, open(_UpperCamelCase , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
UpperCAmelCase_ = compressed_file_paths[protocol]
UpperCAmelCase_ = '''dataset.jsonl'''
UpperCAmelCase_ = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
UpperCAmelCase_ , *UpperCAmelCase_ = fsspec.get_fs_token_paths(_UpperCamelCase )
assert fs.isfile(_UpperCamelCase )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def __lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict ):
'''simple docstring'''
UpperCAmelCase_ = hf_api.dataset_info(_UpperCamelCase , token=_UpperCamelCase )
UpperCAmelCase_ = HfFileSystem(repo_info=_UpperCamelCase , token=_UpperCamelCase )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(_UpperCamelCase ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_UpperCamelCase , _UpperCamelCase , clobber=_UpperCamelCase )
with pytest.warns(_UpperCamelCase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_UpperCamelCase ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 390
| 0
|
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowercase__( *__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Union[Dict, Any]] = None , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=2 ):
from .. import __version__
lowercase_ : Optional[Any] = take_from
lowercase_ : int = ()
if not isinstance(args[0] , __SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__SCREAMING_SNAKE_CASE ).base_version ) >= version.parse(__SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
lowercase_ : List[Any] = None
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__SCREAMING_SNAKE_CASE ),)
lowercase_ : List[str] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
values += (getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),)
lowercase_ : Optional[Any] = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
lowercase_ : Optional[Any] = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
lowercase_ : List[Any] = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , __SCREAMING_SNAKE_CASE , stacklevel=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase_ : str = inspect.getouterframes(inspect.currentframe() )[1]
lowercase_ : Optional[int] = call_frame.filename
lowercase_ : Dict = call_frame.lineno
lowercase_ : Dict = call_frame.function
lowercase_ , lowercase_ : Optional[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return
elif len(__SCREAMING_SNAKE_CASE ) == 1:
return values[0]
return values
| 477
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class UpperCamelCase ( lowercase_ ):
lowercase = 'switch_transformers'
lowercase = ['past_key_values']
lowercase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self ,__UpperCamelCase=3_2128 ,__UpperCamelCase=768 ,__UpperCamelCase=64 ,__UpperCamelCase=2048 ,__UpperCamelCase=64 ,__UpperCamelCase=12 ,__UpperCamelCase=3 ,__UpperCamelCase=12 ,__UpperCamelCase=3 ,__UpperCamelCase=12 ,__UpperCamelCase=8 ,__UpperCamelCase=False ,__UpperCamelCase=0.01 ,__UpperCamelCase="float32" ,__UpperCamelCase=False ,__UpperCamelCase=32 ,__UpperCamelCase=128 ,__UpperCamelCase=0.1 ,__UpperCamelCase=1e-6 ,__UpperCamelCase=0.001 ,__UpperCamelCase=0.001 ,__UpperCamelCase=1.0 ,__UpperCamelCase="relu" ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=0 ,__UpperCamelCase=1 ,**__UpperCamelCase ,) -> str:
'''simple docstring'''
lowercase_ : List[str] = vocab_size
lowercase_ : Optional[Any] = d_model
lowercase_ : Dict = d_kv
lowercase_ : Dict = d_ff
lowercase_ : str = num_sparse_encoder_layers
lowercase_ : List[str] = num_layers
lowercase_ : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase_ : Optional[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowercase_ : Tuple = self.num_layers // self.num_sparse_encoder_layers
else:
lowercase_ : Union[str, Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowercase_ : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowercase_ : List[Any] = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowercase_ : List[Any] = num_heads
lowercase_ : Dict = num_experts
lowercase_ : List[str] = expert_capacity
lowercase_ : Optional[Any] = router_bias
lowercase_ : Optional[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
lowercase_ : Optional[Any] = router_dtype
lowercase_ : Union[str, Any] = router_ignore_padding_tokens
lowercase_ : Any = relative_attention_num_buckets
lowercase_ : List[Any] = relative_attention_max_distance
lowercase_ : str = dropout_rate
lowercase_ : Any = layer_norm_epsilon
lowercase_ : Tuple = initializer_factor
lowercase_ : str = feed_forward_proj
lowercase_ : List[Any] = use_cache
lowercase_ : str = add_router_probs
lowercase_ : Tuple = router_z_loss_coef
lowercase_ : int = router_aux_loss_coef
lowercase_ : List[str] = self.feed_forward_proj.split('-' )
lowercase_ : List[str] = act_info[-1]
lowercase_ : Optional[Any] = act_info[0] == 'gated'
if len(__UpperCamelCase ) > 1 and act_info[0] != "gated" or len(__UpperCamelCase ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowercase_ : Union[str, Any] = 'gelu_new'
super().__init__(
pad_token_id=__UpperCamelCase ,eos_token_id=__UpperCamelCase ,is_encoder_decoder=__UpperCamelCase ,**__UpperCamelCase ,)
| 477
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : Dict = logging.get_logger(__name__)
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
lowercase__ : List[Any] = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
lowercase__ : Union[str, Any] = 10_24
lowercase__ : Dict = 40_96
lowercase__ : Optional[Any] = 24
lowercase__ : List[Any] = 16
lowercase__ : str = [5, 11, 17, 23]
lowercase__ : Union[str, Any] = [2_56, 5_12, 10_24, 10_24]
lowercase__ : Optional[Any] = (1, 3_84, 3_84)
if "nyu" or "midas" in checkpoint_url:
lowercase__ : Any = 7_68
lowercase__ : Tuple = [1, 1, 1, 0.5]
lowercase__ : Union[str, Any] = [2_56, 5_12, 7_68, 7_68]
lowercase__ : List[Any] = 1_50
lowercase__ : str = 16
lowercase__ : Optional[Any] = (1, 3_84, 3_84)
lowercase__ : Union[str, Any] = False
lowercase__ : List[str] = """project"""
if "ade" in checkpoint_url:
lowercase__ : Union[str, Any] = True
lowercase__ : Tuple = 7_68
lowercase__ : Union[str, Any] = [1, 1, 1, 0.5]
lowercase__ : Optional[Any] = 1_50
lowercase__ : Optional[int] = 16
lowercase__ : Optional[int] = """huggingface/label-files"""
lowercase__ : List[str] = """ade20k-id2label.json"""
lowercase__ : List[str] = json.load(open(cached_download(hf_hub_url(lowercase_ , lowercase_ , repo_type="""dataset""" ) ) , """r""" ) )
lowercase__ : Optional[Any] = {int(lowercase_ ): v for k, v in idalabel.items()}
lowercase__ : List[str] = idalabel
lowercase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowercase__ : Any = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def UpperCamelCase ( lowercase_ ) -> Any:
'''simple docstring'''
lowercase__ : Dict = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
def UpperCamelCase ( lowercase_ ) -> Any:
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase__ : List[str] = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
lowercase__ : Tuple = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
lowercase__ : Any = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
lowercase__ : Union[str, Any] = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
lowercase__ : Optional[int] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
lowercase__ : Any = name.replace("""proj""" , """projection""" )
if "blocks" in name:
lowercase__ : Any = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
lowercase__ : Optional[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase__ : Any = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
lowercase__ : Dict = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
lowercase__ : Tuple = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
lowercase__ : Any = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
lowercase__ : int = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
lowercase__ : Tuple = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
lowercase__ : List[str] = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
lowercase__ : Dict = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
lowercase__ : List[str] = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
lowercase__ : int = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase__ : List[str] = name.replace(F'refinenet{layer_idx}' , F'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
lowercase__ : List[Any] = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
lowercase__ : Union[str, Any] = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
lowercase__ : Optional[Any] = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
lowercase__ : Optional[Any] = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
lowercase__ : Optional[Any] = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase__ : Union[str, Any] = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase__ : Optional[int] = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase__ : Any = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase__ : int = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase__ : str = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
lowercase__ : Tuple = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
lowercase__ : Union[str, Any] = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
lowercase__ : Tuple = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
lowercase__ : Union[str, Any] = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
lowercase__ : Dict = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
lowercase__ : int = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
lowercase__ : str = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
lowercase__ : Any = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
lowercase__ : Tuple = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
lowercase__ : List[Any] = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
lowercase__ : List[Any] = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
lowercase__ : List[Any] = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
lowercase__ : Tuple = name.replace("""..""" , """.""" )
if "stem.conv" in name:
lowercase__ : List[Any] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowercase__ : Any = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
lowercase__ : Dict = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
lowercase__ : Dict = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
lowercase__ : Optional[int] = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
lowercase__ : Union[str, Any] = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
lowercase__ : str = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def UpperCamelCase ( lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : str = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.weight' )
lowercase__ : Tuple = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Tuple = in_proj_weight[: config.hidden_size, :]
lowercase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowercase__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
lowercase__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase__ : Union[str, Any] = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ , lowercase__ : str = get_dpt_config(lowercase_ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowercase__ : Any = torch.load(lowercase_ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(lowercase_ )
# rename keys
for key in state_dict.copy().keys():
lowercase__ : str = state_dict.pop(lowercase_ )
lowercase__ : int = val
# read in qkv matrices
read_in_q_k_v(lowercase_ , lowercase_ )
# load HuggingFace model
lowercase__ : Any = DPTForSemanticSegmentation(lowercase_ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
# Check outputs on an image
lowercase__ : List[Any] = 4_80 if """ade""" in checkpoint_url else 3_84
lowercase__ : Dict = DPTImageProcessor(size=lowercase_ )
lowercase__ : Optional[int] = prepare_img()
lowercase__ : Dict = image_processor(lowercase_ , return_tensors="""pt""" )
# forward pass
lowercase__ : Any = model(**lowercase_ ).logits if """ade""" in checkpoint_url else model(**lowercase_ ).predicted_depth
if show_prediction:
lowercase__ : Union[str, Any] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=lowercase_ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_55 ).show()
if pytorch_dump_folder_path is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
lowerCamelCase__ : Any = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 12
|
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
A : int = TypeVar("""T""")
class lowerCAmelCase_ ( Generic[T] ):
def __init__( self : int, _snake_case : bool = True ):
'''simple docstring'''
snake_case : dict[T, list[T]] ={} # dictionary of lists
snake_case : Optional[int] =directed
def __snake_case ( self : Any, _snake_case : T, _snake_case : T ):
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
self.adj_list[destination_vertex].append(_snake_case )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
snake_case : Any =[source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_snake_case )
snake_case : int =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
snake_case : Union[str, Any] =[destination_vertex]
snake_case : str =[source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
snake_case : Optional[Any] =[]
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
snake_case : Any =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
snake_case : int =[destination_vertex]
snake_case : Optional[Any] =[]
return self
def __repr__( self : int ):
'''simple docstring'''
return pformat(self.adj_list )
| 349
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def _lowerCAmelCase ( ) -> Generator[int, None, None]:
'''simple docstring'''
_UpperCamelCase :dict[int, int] ={}
_UpperCamelCase :Dict =2
while True:
_UpperCamelCase :Optional[Any] =factor_map.pop(_lowercase , _lowercase )
if factor:
_UpperCamelCase :Tuple =factor + prime
while x in factor_map:
x += factor
_UpperCamelCase :Optional[Any] =factor
else:
_UpperCamelCase :int =prime
yield prime
prime += 1
def _lowerCAmelCase ( __a = 1e1_0 ) -> int:
'''simple docstring'''
_UpperCamelCase :Tuple =sieve()
_UpperCamelCase :str =1
while True:
_UpperCamelCase :Optional[int] =next(_lowercase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(_lowercase )
n += 2
if __name__ == "__main__":
print(solution())
| 709
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=0.9 , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :List[str] =size if size is not None else {"""shortest_edge""": 30}
_UpperCamelCase :str =crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
_UpperCamelCase :Tuple =parent
_UpperCamelCase :Optional[int] =batch_size
_UpperCamelCase :Tuple =num_channels
_UpperCamelCase :int =min_resolution
_UpperCamelCase :Union[str, Any] =max_resolution
_UpperCamelCase :Tuple =do_resize_and_center_crop
_UpperCamelCase :Union[str, Any] =size
_UpperCamelCase :Union[str, Any] =crop_pct
_UpperCamelCase :Tuple =crop_size
_UpperCamelCase :List[str] =do_normalize
_UpperCamelCase :Any =image_mean
_UpperCamelCase :Optional[Any] =image_std
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase__ ( __snake_case , unittest.TestCase ):
__UpperCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :Dict =PoolFormerImageProcessingTester(self )
@property
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """crop_pct""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase :Dict =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
_UpperCamelCase :Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
pass
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :Any =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase :List[str] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCamelCase :int =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCamelCase :Optional[Any] =image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
_UpperCamelCase :Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase :int =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_UpperCamelCase :List[Any] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCamelCase :Tuple =image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase :Optional[int] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCamelCase :Dict =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCamelCase :Tuple =image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 512
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase_ ( _lowerCamelCase: list[float] ) -> bool:
'''simple docstring'''
if len(_lowerCamelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
__lowerCamelCase : Dict = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 646
|
"""simple docstring"""
def lowercase_ ( _lowerCamelCase: float , _lowerCamelCase: float ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(100, 0.25) = }""")
print(F"""{price_plus_tax(1_25.50, 0.05) = }""")
| 646
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 721
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "umt5"
lowerCAmelCase__ : Tuple = ["past_key_values"]
def __init__( self : str , _UpperCAmelCase : int=25_01_12 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : List[str]=64 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : str=8 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : str=32 , _UpperCAmelCase : Optional[int]=1_28 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : str=1e-6 , _UpperCAmelCase : Dict=1.0 , _UpperCAmelCase : str="gated-gelu" , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Tuple="T5Tokenizer" , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[str]=0 , _UpperCAmelCase : int=1 , _UpperCAmelCase : List[str]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
is_encoder_decoder=_UpperCAmelCase , tokenizer_class=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = vocab_size
__lowercase = d_model
__lowercase = d_kv
__lowercase = d_ff
__lowercase = num_layers
__lowercase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowercase = num_heads
__lowercase = relative_attention_num_buckets
__lowercase = relative_attention_max_distance
__lowercase = dropout_rate
__lowercase = layer_norm_epsilon
__lowercase = initializer_factor
__lowercase = feed_forward_proj
__lowercase = use_cache
__lowercase = self.feed_forward_proj.split('-' )
__lowercase = act_info[-1]
__lowercase = act_info[0] == 'gated'
if len(_UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(_UpperCAmelCase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__lowercase = 'gelu_new'
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self.d_model
@property
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.num_heads
@property
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.num_layers
class A__ ( lowerCAmelCase__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def a__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__lowercase = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__lowercase = 'past_encoder_sequence + sequence'
__lowercase = {0: 'batch'}
__lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return 13
@property
def a__ ( self : Dict ) -> float:
"""simple docstring"""
return 5e-4
| 688
| 0
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCamelCase__( unittest.TestCase ):
def __init__( self : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : int=100 , lowerCAmelCase : List[Any]=13 , lowerCAmelCase : Dict=30 , lowerCAmelCase : Any=2 , lowerCAmelCase : int=3 , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Optional[int]=32 , lowerCAmelCase : str=5 , lowerCAmelCase : Optional[int]=4 , lowerCAmelCase : Tuple=37 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Union[str, Any]=10 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Tuple=3 , )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = vocab_size
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def a__( self : Dict )-> int:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def a__( self : Any , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict )-> Dict:
"""simple docstring"""
UpperCAmelCase = FlaxBeitModel(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = FlaxBeitForMaskedImageModeling(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def a__( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = FlaxBeitForImageClassification(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = FlaxBeitForImageClassification(lowerCAmelCase )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(lowerCAmelCase )
def a__( self : Dict )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Any = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def a__( self : Dict )-> None:
"""simple docstring"""
UpperCAmelCase = FlaxBeitModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 )
def a__( self : int )-> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__( self : Tuple )-> Any:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def a__( self : Tuple )-> List[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model_class(lowerCAmelCase )
@jax.jit
def model_jitted(lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any] ):
return model(pixel_values=lowerCAmelCase , **lowerCAmelCase )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def a__( self : str )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase )
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def a__( self : Optional[Any] )-> int:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
UpperCAmelCase = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCAmelCase )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class UpperCamelCase__( unittest.TestCase ):
@cached_property
def a__( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def a__( self : str )-> Tuple:
"""simple docstring"""
UpperCAmelCase = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowerCAmelCase , return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
UpperCAmelCase = np.ones((1, 196) , dtype=lowerCAmelCase )
# forward pass
UpperCAmelCase = model(pixel_values=lowerCAmelCase , bool_masked_pos=lowerCAmelCase )
UpperCAmelCase = outputs.logits
# verify the logits
UpperCAmelCase = (1, 196, 8192)
self.assertEqual(logits.shape , lowerCAmelCase )
UpperCAmelCase = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , lowerCAmelCase , atol=1E-2 ) )
@slow
def a__( self : str )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowerCAmelCase , return_tensors='''np''' )
# forward pass
UpperCAmelCase = model(**lowerCAmelCase )
UpperCAmelCase = outputs.logits
# verify the logits
UpperCAmelCase = (1, 1000)
self.assertEqual(logits.shape , lowerCAmelCase )
UpperCAmelCase = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
UpperCAmelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , lowerCAmelCase )
@slow
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowerCAmelCase , return_tensors='''np''' )
# forward pass
UpperCAmelCase = model(**lowerCAmelCase )
UpperCAmelCase = outputs.logits
# verify the logits
UpperCAmelCase = (1, 21841)
self.assertEqual(logits.shape , lowerCAmelCase )
UpperCAmelCase = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
UpperCAmelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , lowerCAmelCase )
| 210
|
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : List[Any] = MobileBertTokenizer
__magic_name__ : str = MobileBertTokenizerFast
__magic_name__ : Optional[int] = True
__magic_name__ : List[Any] = True
__magic_name__ : Dict = filter_non_english
__magic_name__ : str = "google/mobilebert-uncased"
def a__( self : Dict )-> Any:
"""simple docstring"""
super().setUp()
UpperCAmelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
UpperCAmelCase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def a__( self : Any , lowerCAmelCase : Tuple )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = '''UNwant\u00E9d,running'''
UpperCAmelCase = '''unwanted, running'''
return input_text, output_text
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.tokenizer_class(self.vocab_file )
UpperCAmelCase = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def a__( self : str )-> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = '''UNwant\u00E9d,running'''
UpperCAmelCase = tokenizer.tokenize(lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# With lower casing
UpperCAmelCase = self.get_tokenizer(do_lower_case=lowerCAmelCase )
UpperCAmelCase = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase )
UpperCAmelCase = '''UNwant\u00E9d,running'''
UpperCAmelCase = tokenizer.tokenize(lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def a__( self : str )-> int:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def a__( self : Dict )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a__( self : str )-> List[str]:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def a__( self : Dict )-> Dict:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a__( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a__( self : Tuple )-> int:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a__( self : str )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a__( self : int )-> Any:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a__( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def a__( self : Any )-> int:
"""simple docstring"""
UpperCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
UpperCAmelCase = {}
for i, token in enumerate(lowerCAmelCase ):
UpperCAmelCase = i
UpperCAmelCase = WordpieceTokenizer(vocab=lowerCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def a__( self : Optional[int] )-> int:
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def a__( self : int )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def a__( self : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def a__( self : Any )-> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCAmelCase = tokenizer_r.encode_plus(
lowerCAmelCase , return_attention_mask=lowerCAmelCase , return_token_type_ids=lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase , )
UpperCAmelCase = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase , '''do_lower_case''' ) else False
UpperCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = ['''的''', '''人''', '''有''']
UpperCAmelCase = ''''''.join(lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase = True
UpperCAmelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = tokenizer_p.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = tokenizer_r.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = False
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = tokenizer_r.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = tokenizer_p.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase )
]
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
| 210
| 1
|
class lowerCamelCase__: # Public class to implement a graph
def __init__( self: Dict , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
__lowerCamelCase = row
__lowerCamelCase = col
__lowerCamelCase = graph
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
# Checking all 8 elements surrounding nth element
__lowerCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__lowerCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
__lowerCamelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ): # And finally, count all islands.
__lowerCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
__lowerCamelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += 1
return count
| 710
|
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 80
| 0
|
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=lowerCamelCase ):
a__ = ['''note_seq''']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def A ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def A ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 0
|
def _lowerCamelCase ( __A : str ) -> list:
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__A ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('doctest').testmod()
| 485
| 0
|
def lowerCAmelCase_ ( __UpperCAmelCase: float ) -> float:
if edge <= 0 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def lowerCAmelCase_ ( __UpperCAmelCase: float ) -> float:
if edge <= 0 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
from __future__ import annotations
from math import gcd
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: int = 2 , __UpperCAmelCase: int = 1 , __UpperCAmelCase: int = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('''The input value cannot be less than 2''' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: int ) -> int:
return (pow(__UpperCAmelCase , 2 ) + step) % modulus
for _ in range(__UpperCAmelCase ):
# These track the position within the cycle detection logic.
UpperCamelCase__ : List[Any] = seed
UpperCamelCase__ : List[str] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
UpperCamelCase__ : Optional[int] = rand_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : List[str] = rand_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Optional[int] = rand_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
UpperCamelCase__ : Optional[int] = gcd(hare - tortoise , __UpperCAmelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
UpperCamelCase__ : List[str] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
UpperCAmelCase_ = args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 369
| 1
|
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :List[str] = checkpoint
snake_case_ :Union[str, Any] = {}
snake_case_ :List[str] = vae_state_dict["encoder.conv_in.weight"]
snake_case_ :List[Any] = vae_state_dict["encoder.conv_in.bias"]
snake_case_ :Union[str, Any] = vae_state_dict["encoder.conv_out.weight"]
snake_case_ :List[str] = vae_state_dict["encoder.conv_out.bias"]
snake_case_ :Optional[Any] = vae_state_dict["encoder.norm_out.weight"]
snake_case_ :Optional[Any] = vae_state_dict["encoder.norm_out.bias"]
snake_case_ :Optional[int] = vae_state_dict["decoder.conv_in.weight"]
snake_case_ :Optional[Any] = vae_state_dict["decoder.conv_in.bias"]
snake_case_ :Dict = vae_state_dict["decoder.conv_out.weight"]
snake_case_ :List[str] = vae_state_dict["decoder.conv_out.bias"]
snake_case_ :List[str] = vae_state_dict["decoder.norm_out.weight"]
snake_case_ :int = vae_state_dict["decoder.norm_out.bias"]
snake_case_ :Optional[Any] = vae_state_dict["quant_conv.weight"]
snake_case_ :str = vae_state_dict["quant_conv.bias"]
snake_case_ :Tuple = vae_state_dict["post_quant_conv.weight"]
snake_case_ :Dict = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
snake_case_ :Optional[int] = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
snake_case_ :Any = {
layer_id: [key for key in vae_state_dict if F'''down.{layer_id}''' in key] for layer_id in range(_A )
}
# Retrieves the keys for the decoder up blocks only
snake_case_ :str = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
snake_case_ :Optional[Any] = {
layer_id: [key for key in vae_state_dict if F'''up.{layer_id}''' in key] for layer_id in range(_A )
}
for i in range(_A ):
snake_case_ :str = [key for key in down_blocks[i] if F'''down.{i}''' in key and F'''down.{i}.downsample''' not in key]
if F'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
snake_case_ :List[Any] = vae_state_dict.pop(
F'''encoder.down.{i}.downsample.conv.weight''' )
snake_case_ :Optional[int] = vae_state_dict.pop(
F'''encoder.down.{i}.downsample.conv.bias''' )
snake_case_ :List[str] = renew_vae_resnet_paths(_A )
snake_case_ :Any = {"old": F'''down.{i}.block''', "new": F'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(_A, _A, _A, additional_replacements=[meta_path], config=_A )
snake_case_ :str = [key for key in vae_state_dict if "encoder.mid.block" in key]
snake_case_ :List[Any] = 2
for i in range(1, num_mid_res_blocks + 1 ):
snake_case_ :List[str] = [key for key in mid_resnets if F'''encoder.mid.block_{i}''' in key]
snake_case_ :Any = renew_vae_resnet_paths(_A )
snake_case_ :Union[str, Any] = {"old": F'''mid.block_{i}''', "new": F'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(_A, _A, _A, additional_replacements=[meta_path], config=_A )
snake_case_ :List[Any] = [key for key in vae_state_dict if "encoder.mid.attn" in key]
snake_case_ :Tuple = renew_vae_attention_paths(_A )
snake_case_ :List[str] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(_A, _A, _A, additional_replacements=[meta_path], config=_A )
conv_attn_to_linear(_A )
for i in range(_A ):
snake_case_ :Union[str, Any] = num_up_blocks - 1 - i
snake_case_ :str = [
key for key in up_blocks[block_id] if F'''up.{block_id}''' in key and F'''up.{block_id}.upsample''' not in key
]
if F'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
snake_case_ :List[str] = vae_state_dict[
F'''decoder.up.{block_id}.upsample.conv.weight'''
]
snake_case_ :Optional[Any] = vae_state_dict[
F'''decoder.up.{block_id}.upsample.conv.bias'''
]
snake_case_ :Optional[int] = renew_vae_resnet_paths(_A )
snake_case_ :List[str] = {"old": F'''up.{block_id}.block''', "new": F'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(_A, _A, _A, additional_replacements=[meta_path], config=_A )
snake_case_ :List[str] = [key for key in vae_state_dict if "decoder.mid.block" in key]
snake_case_ :List[Any] = 2
for i in range(1, num_mid_res_blocks + 1 ):
snake_case_ :Tuple = [key for key in mid_resnets if F'''decoder.mid.block_{i}''' in key]
snake_case_ :Tuple = renew_vae_resnet_paths(_A )
snake_case_ :Any = {"old": F'''mid.block_{i}''', "new": F'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(_A, _A, _A, additional_replacements=[meta_path], config=_A )
snake_case_ :str = [key for key in vae_state_dict if "decoder.mid.attn" in key]
snake_case_ :Tuple = renew_vae_attention_paths(_A )
snake_case_ :int = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(_A, _A, _A, additional_replacements=[meta_path], config=_A )
conv_attn_to_linear(_A )
return new_checkpoint
def A ( _A, _A, ):
"""simple docstring"""
# Only support V1
snake_case_ :Dict = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
snake_case_ :str = io.BytesIO(r.content )
snake_case_ :Tuple = OmegaConf.load(_A )
snake_case_ :Dict = 512
snake_case_ :List[str] = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
snake_case_ :Tuple = {}
with safe_open(_A, framework="pt", device="cpu" ) as f:
for key in f.keys():
snake_case_ :List[Any] = f.get_tensor(_A )
else:
snake_case_ :Dict = torch.load(_A, map_location=_A )["state_dict"]
# Convert the VAE model.
snake_case_ :Optional[int] = create_vae_diffusers_config(_A, image_size=_A )
snake_case_ :Any = custom_convert_ldm_vae_checkpoint(_A, _A )
snake_case_ :List[Any] = AutoencoderKL(**_A )
vae.load_state_dict(_A )
vae.save_pretrained(_A )
if __name__ == "__main__":
__UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
__UpperCAmelCase : Optional[int] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 584
|
"""simple docstring"""
def A ( _A = 100 ):
"""simple docstring"""
snake_case_ :int = set()
snake_case_ :Dict = 0
snake_case_ :str = n + 1 # maximum limit
for a in range(2, _A ):
for b in range(2, _A ):
snake_case_ :Optional[Any] = a**b # calculates the current power
collect_powers.add(_A ) # adds the result to the set
return len(_A )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 584
| 1
|
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
A__ : Dict = logging.getLogger(__name__)
A__ : Any = {'facebook/bart-base': BartForConditionalGeneration}
A__ : Optional[int] = {'facebook/bart-base': BartTokenizer}
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Union[str, Any] = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=_UpperCamelCase , default=_UpperCamelCase , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=_UpperCamelCase , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=_UpperCamelCase , default=_UpperCamelCase , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=_UpperCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_UpperCamelCase , )
parser.add_argument(
'''--config_name''' , type=_UpperCamelCase , default=_UpperCamelCase , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=_UpperCamelCase , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=_UpperCamelCase , default=_UpperCamelCase , help='''Where to store the final ONNX file.''' )
_lowercase: Tuple = parser.parse_args()
return args
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase="cpu" ):
"""simple docstring"""
_lowercase: Union[str, Any] = model_dict[model_name].from_pretrained(_UpperCamelCase ).to(_UpperCamelCase )
_lowercase: Union[str, Any] = tokenizer_dict[model_name].from_pretrained(_UpperCamelCase )
if model_name in ["facebook/bart-base"]:
_lowercase: List[str] = 0
_lowercase: Any = None
_lowercase: List[str] = 0
return huggingface_model, tokenizer
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
model.eval()
_lowercase: Optional[int] = None
_lowercase: int = torch.jit.script(BARTBeamSearchGenerator(_UpperCamelCase ) )
with torch.no_grad():
_lowercase: Optional[Any] = '''My friends are cool but they eat too many carbs.'''
_lowercase: Optional[int] = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='''pt''' ).to(model.device )
_lowercase: int = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=_UpperCamelCase , max_length=_UpperCamelCase , early_stopping=_UpperCamelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_UpperCamelCase , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _UpperCamelCase , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=_UpperCamelCase , )
logger.info('''Model exported to {}'''.format(_UpperCamelCase ) )
_lowercase: Optional[int] = remove_dup_initializers(os.path.abspath(_UpperCamelCase ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(_UpperCamelCase ) )
_lowercase: Tuple = onnxruntime.InferenceSession(_UpperCamelCase )
_lowercase: Union[str, Any] = ort_sess.run(
_UpperCamelCase , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(_UpperCamelCase ),
'''max_length''': np.array(_UpperCamelCase ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Tuple = parse_args()
_lowercase: Union[str, Any] = 5
_lowercase: List[Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowercase: int = torch.device(args.device )
_lowercase: Optional[Any] = load_model_tokenizer(args.model_name_or_path , _UpperCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(_UpperCamelCase )
if args.max_length:
_lowercase: List[Any] = args.max_length
if args.num_beams:
_lowercase: int = args.num_beams
if args.output_file_path:
_lowercase: Dict = args.output_file_path
else:
_lowercase: Tuple = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 700
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ : List[Any] = logging.get_logger()
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = True ):
"""simple docstring"""
print(f'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
_lowercase: int = timm.create_model('''levit_128s''' , pretrained=_UpperCamelCase )
else:
_lowercase: int = timm.create_model('''levit_128''' , pretrained=_UpperCamelCase )
if hidden_sizes == 192:
_lowercase: str = timm.create_model('''levit_192''' , pretrained=_UpperCamelCase )
if hidden_sizes == 256:
_lowercase: int = timm.create_model('''levit_256''' , pretrained=_UpperCamelCase )
if hidden_sizes == 384:
_lowercase: Dict = timm.create_model('''levit_384''' , pretrained=_UpperCamelCase )
from_model.eval()
_lowercase: Any = LevitForImageClassificationWithTeacher(_UpperCamelCase ).eval()
_lowercase: Union[str, Any] = OrderedDict()
_lowercase: Optional[Any] = from_model.state_dict()
_lowercase: List[Any] = list(from_model.state_dict().keys() )
_lowercase: int = list(our_model.state_dict().keys() )
print(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for i in range(len(_UpperCamelCase ) ):
_lowercase: Any = weights[og_keys[i]]
our_model.load_state_dict(_UpperCamelCase )
_lowercase: int = torch.randn((2, 3, 224, 224) )
_lowercase: List[str] = from_model(_UpperCamelCase )
_lowercase: Optional[int] = our_model(_UpperCamelCase ).logits
assert torch.allclose(_UpperCamelCase , _UpperCamelCase ), "The model logits don't match the original one."
_lowercase: Union[str, Any] = name
print(_UpperCamelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
_lowercase: Dict = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'''Pushed {checkpoint_name}''' )
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True ):
"""simple docstring"""
_lowercase: int = '''imagenet-1k-id2label.json'''
_lowercase: int = 1_000
_lowercase: Tuple = (1, num_labels)
_lowercase: Dict = '''huggingface/label-files'''
_lowercase: Optional[int] = num_labels
_lowercase: List[str] = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowercase: List[str] = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
_lowercase: int = idalabel
_lowercase: Tuple = {v: k for k, v in idalabel.items()}
_lowercase: Tuple = partial(_UpperCamelCase , num_labels=_UpperCamelCase , idalabel=_UpperCamelCase , labelaid=_UpperCamelCase )
_lowercase: Union[str, Any] = {
'''levit-128S''': 128,
'''levit-128''': 128,
'''levit-192''': 192,
'''levit-256''': 256,
'''levit-384''': 384,
}
_lowercase: int = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , _UpperCamelCase , names_to_config[model_name] , _UpperCamelCase , _UpperCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return config, expected_shape
if __name__ == "__main__":
A__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
A__ : Optional[Any] = parser.parse_args()
A__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 272
| 0
|
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar("""KEY""")
__UpperCAmelCase = TypeVar("""VAL""")
@dataclass(frozen=a_ , slots=a_ )
class SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ):
"""simple docstring"""
lowerCamelCase : KEY
lowerCamelCase : VAL
class SCREAMING_SNAKE_CASE ( _Item ):
"""simple docstring"""
def __init__( self : Any ) -> None:
"""simple docstring"""
super().__init__(lowerCAmelCase , lowerCAmelCase )
def __bool__( self : Union[str, Any] ) -> bool:
"""simple docstring"""
return False
__UpperCAmelCase = _DeletedItem()
class SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase : int = 8 , lowerCAmelCase : float = 0.75 ) -> None:
"""simple docstring"""
__lowerCAmelCase : List[Any] = initial_block_size
__lowerCAmelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowerCAmelCase : str = capacity_factor
__lowerCAmelCase : Any = 0
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : KEY ) -> int:
"""simple docstring"""
return hash(lowerCAmelCase ) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : int ) -> int:
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : int , lowerCAmelCase : KEY , lowerCAmelCase : VAL ) -> bool:
"""simple docstring"""
__lowerCAmelCase : Any = self._buckets[ind]
if not stored:
__lowerCAmelCase : int = _Item(lowerCAmelCase , lowerCAmelCase )
self._len += 1
return True
elif stored.key == key:
__lowerCAmelCase : Any = _Item(lowerCAmelCase , lowerCAmelCase )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
"""simple docstring"""
__lowerCAmelCase : List[str] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
__lowerCAmelCase : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : int ) -> None:
"""simple docstring"""
__lowerCAmelCase : List[str] = self._buckets
__lowerCAmelCase : Union[str, Any] = [None] * new_size
__lowerCAmelCase : Union[str, Any] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> None:
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> None:
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : KEY ) -> Iterator[int]:
"""simple docstring"""
__lowerCAmelCase : Any = self._get_bucket_index(lowerCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
__lowerCAmelCase : Union[str, Any] = self._get_next_ind(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : KEY , lowerCAmelCase : VAL ) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(lowerCAmelCase ):
if self._try_set(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
break
def __setitem__( self : List[Any] , lowerCAmelCase : KEY , lowerCAmelCase : VAL ) -> None:
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(lowerCAmelCase , lowerCAmelCase )
def __delitem__( self : Any , lowerCAmelCase : KEY ) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(lowerCAmelCase ):
__lowerCAmelCase : Optional[int] = self._buckets[ind]
if item is None:
raise KeyError(lowerCAmelCase )
if item is _deleted:
continue
if item.key == key:
__lowerCAmelCase : List[str] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Any , lowerCAmelCase : KEY ) -> VAL:
"""simple docstring"""
for ind in self._iterate_buckets(lowerCAmelCase ):
__lowerCAmelCase : Tuple = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCAmelCase )
def __len__( self : List[str] ) -> int:
"""simple docstring"""
return self._len
def __iter__( self : Union[str, Any] ) -> Iterator[KEY]:
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : int = """ ,""".join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 651
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__UpperCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase = logging.getLogger()
def snake_case_ () -> Optional[Any]:
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""-f""" )
__lowerCAmelCase : Dict = parser.parse_args()
return args.f
def snake_case_ (__A : Dict , __A : List[str]="eval" ) -> int:
__lowerCAmelCase : int = os.path.join(__A , f'''{split}_results.json''' )
if os.path.exists(__A ):
with open(__A , """r""" ) as f:
return json.load(__A )
raise ValueError(f'''can\'t find {path}''' )
__UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : Optional[Any] = f'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_flax_glue.main()
__lowerCAmelCase : Dict = get_results(lowerCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : Any = f'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_clm_flax.main()
__lowerCAmelCase : int = get_results(lowerCAmelCase )
self.assertLess(result["""eval_perplexity"""] , 1_00 )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : int = f'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_summarization_flax.main()
__lowerCAmelCase : Union[str, Any] = get_results(lowerCAmelCase , split="""test""" )
self.assertGreaterEqual(result["""test_rouge1"""] , 10 )
self.assertGreaterEqual(result["""test_rouge2"""] , 2 )
self.assertGreaterEqual(result["""test_rougeL"""] , 7 )
self.assertGreaterEqual(result["""test_rougeLsum"""] , 7 )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : List[str] = f'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_mlm_flax.main()
__lowerCAmelCase : List[Any] = get_results(lowerCAmelCase )
self.assertLess(result["""eval_perplexity"""] , 42 )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : List[str] = f'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_ta_mlm_flax.main()
__lowerCAmelCase : Union[str, Any] = get_results(lowerCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.42 )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = 7 if get_gpu_count() > 1 else 2
__lowerCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : List[Any] = f'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_flax_ner.main()
__lowerCAmelCase : Dict = get_results(lowerCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertGreaterEqual(result["""eval_f1"""] , 0.3 )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : List[str] = f'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_qa.main()
__lowerCAmelCase : Union[str, Any] = get_results(lowerCAmelCase )
self.assertGreaterEqual(result["""eval_f1"""] , 30 )
self.assertGreaterEqual(result["""eval_exact"""] , 30 )
| 651
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A_ = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''ConvNextFeatureExtractor''']
A_ = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 709
|
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Union[str, Any]=None ) ->Tuple:
A__ : Dict = None
if token is not None:
A__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
A__ : Dict = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
A__ : Any = requests.get(UpperCAmelCase__, headers=UpperCAmelCase__ ).json()
A__ : Tuple = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
A__ : Optional[Any] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(UpperCAmelCase__ ):
A__ : str = requests.get(url + f'&page={i + 2}', headers=UpperCAmelCase__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : str=None ) ->List[str]:
A__ : Optional[Any] = None
if token is not None:
A__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
A__ : str = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
A__ : Dict = requests.get(UpperCAmelCase__, headers=UpperCAmelCase__ ).json()
A__ : Any = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
A__ : Union[str, Any] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(UpperCAmelCase__ ):
A__ : Union[str, Any] = requests.get(url + f'&page={i + 2}', headers=UpperCAmelCase__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Any ) ->Tuple:
A__ : Tuple = None
if token is not None:
A__ : List[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
A__ : Tuple = requests.get(UpperCAmelCase__, headers=UpperCAmelCase__, allow_redirects=UpperCAmelCase__ )
A__ : Dict = result.headers["""Location"""]
A__ : Union[str, Any] = requests.get(UpperCAmelCase__, allow_redirects=UpperCAmelCase__ )
A__ : int = os.path.join(UpperCAmelCase__, f'{artifact_name}.zip' )
with open(UpperCAmelCase__, """wb""" ) as fp:
fp.write(response.content )
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Tuple=None ) ->Tuple:
A__ : int = []
A__ : Union[str, Any] = []
A__ : Optional[Any] = None
with zipfile.ZipFile(UpperCAmelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCAmelCase__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(UpperCAmelCase__ ) as f:
for line in f:
A__ : int = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
A__ : List[str] = line[: line.index(""": """ )]
A__ : str = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
A__ : Any = line[len("""FAILED """ ) :]
failed_tests.append(UpperCAmelCase__ )
elif filename == "job_name.txt":
A__ : Any = line
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
f'`errors` and `failed_tests` should have the same number of elements. Got {len(UpperCAmelCase__ )} for `errors` '
f'and {len(UpperCAmelCase__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
""" problem.""" )
A__ : List[str] = None
if job_name and job_links:
A__ : Any = job_links.get(UpperCAmelCase__, UpperCAmelCase__ )
# A list with elements of the form (line of error, error, failed test)
A__ : str = [x + [y] + [job_link] for x, y in zip(UpperCAmelCase__, UpperCAmelCase__ )]
return result
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int=None ) ->str:
A__ : List[Any] = []
A__ : Dict = [os.path.join(UpperCAmelCase__, UpperCAmelCase__ ) for p in os.listdir(UpperCAmelCase__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(UpperCAmelCase__, job_links=UpperCAmelCase__ ) )
return errors
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Dict=None ) ->List[Any]:
A__ : Dict = Counter()
counter.update([x[1] for x in logs] )
A__ : str = counter.most_common()
A__ : Dict = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
A__ : Optional[int] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
A__ : List[str] = dict(sorted(r.items(), key=lambda UpperCAmelCase__ : item[1]["count"], reverse=UpperCAmelCase__ ) )
return r
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] ) ->str:
A__ : List[str] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
A__ : Union[str, Any] = test.split("""/""" )[2]
else:
A__ : int = None
return test
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : str=None ) ->Optional[Any]:
A__ : Any = [(x[0], x[1], get_model(x[2] )) for x in logs]
A__ : List[Any] = [x for x in logs if x[2] is not None]
A__ : Union[str, Any] = {x[2] for x in logs}
A__ : int = {}
for test in tests:
A__ : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
A__ : Any = counter.most_common()
A__ : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
A__ : List[str] = sum(error_counts.values() )
if n_errors > 0:
A__ : str = {"""count""": n_errors, """errors""": error_counts}
A__ : Dict = dict(sorted(r.items(), key=lambda UpperCAmelCase__ : item[1]["count"], reverse=UpperCAmelCase__ ) )
return r
def _lowerCAmelCase ( UpperCAmelCase__ : Dict ) ->List[Any]:
A__ : List[Any] = """| no. | error | status |"""
A__ : Union[str, Any] = """|-:|:-|:-|"""
A__ : Dict = [header, sep]
for error in reduced_by_error:
A__ : List[Any] = reduced_by_error[error]["""count"""]
A__ : List[Any] = f'| {count} | {error[:1_0_0]} | |'
lines.append(UpperCAmelCase__ )
return "\n".join(UpperCAmelCase__ )
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->int:
A__ : str = """| model | no. of errors | major error | count |"""
A__ : Optional[int] = """|-:|-:|-:|-:|"""
A__ : Tuple = [header, sep]
for model in reduced_by_model:
A__ : Optional[Any] = reduced_by_model[model]["""count"""]
A__ , A__ : Optional[Any] = list(reduced_by_model[model]["""errors"""].items() )[0]
A__ : Optional[int] = f'| {model} | {count} | {error[:6_0]} | {_count} |'
lines.append(UpperCAmelCase__ )
return "\n".join(UpperCAmelCase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
A_ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ = get_job_links(args.workflow_run_id, token=args.token)
A_ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ = k.find(''' / ''')
A_ = k[index + len(''' / ''') :]
A_ = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ = reduce_by_error(errors)
A_ = reduce_by_model(errors)
A_ = make_github_table(reduced_by_error)
A_ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
| 498
| 0
|
from __future__ import annotations
from typing import Any
def snake_case (UpperCamelCase : list[Any] ):
'''simple docstring'''
create_state_space_tree(UpperCamelCase__ , [] , 0 )
def snake_case (UpperCamelCase : list[Any] , UpperCamelCase : list[Any] , UpperCamelCase : int ):
'''simple docstring'''
if index == len(UpperCamelCase__ ):
print(UpperCamelCase__ )
return
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
a__ : List[str] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 165
|
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_lowerCamelCase = logging.get_logger(__name__)
# General docstring
_lowerCamelCase = 'PoolFormerConfig'
# Base docstring
_lowerCamelCase = 'sail/poolformer_s12'
_lowerCamelCase = [1, 512, 7, 7]
# Image classification docstring
_lowerCamelCase = 'sail/poolformer_s12'
_lowerCamelCase = 'tabby, tabby cat'
_lowerCamelCase = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: float = 0.0 , UpperCamelCase__: bool = False ):
if drop_prob == 0.0 or not training:
return input
SCREAMING_SNAKE_CASE__ = 1 - drop_prob
SCREAMING_SNAKE_CASE__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
SCREAMING_SNAKE_CASE__ = keep_prob + torch.rand(UpperCamelCase__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
SCREAMING_SNAKE_CASE__ = input.div(UpperCamelCase__ ) * random_tensor
return output
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Optional[Any] , __A :Optional[float] = None ) -> None:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = drop_prob
def _snake_case ( self :Any , __A :torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return drop_path(__A , self.drop_prob , self.training )
def _snake_case ( self :Dict ) -> str:
"""simple docstring"""
return "p={}".format(self.drop_prob )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Dict , __A :Optional[Any] , __A :Dict , __A :List[str] , __A :Optional[Any] , __A :Tuple , __A :Optional[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size)
SCREAMING_SNAKE_CASE__ = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride)
SCREAMING_SNAKE_CASE__ = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding)
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A )
SCREAMING_SNAKE_CASE__ = norm_layer(__A ) if norm_layer else nn.Identity()
def _snake_case ( self :Dict , __A :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.projection(__A )
SCREAMING_SNAKE_CASE__ = self.norm(__A )
return embeddings
class UpperCamelCase_ ( nn.GroupNorm ):
def __init__( self :Dict , __A :Tuple , **__A :Union[str, Any] ) -> Dict:
"""simple docstring"""
super().__init__(1 , __A , **__A )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :List[str] , __A :Optional[int] ) -> Any:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A )
def _snake_case ( self :Any , __A :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.pool(__A ) - hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Optional[Any] , __A :Tuple , __A :Dict , __A :int , __A :Any ) -> str:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A )
if isinstance(config.hidden_act , __A ):
SCREAMING_SNAKE_CASE__ = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE__ = config.hidden_act
def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.conva(__A )
SCREAMING_SNAKE_CASE__ = self.act_fn(__A )
SCREAMING_SNAKE_CASE__ = self.drop(__A )
SCREAMING_SNAKE_CASE__ = self.conva(__A )
SCREAMING_SNAKE_CASE__ = self.drop(__A )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Any , __A :str , __A :List[str] , __A :Tuple , __A :Dict , __A :Union[str, Any] , __A :int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = PoolFormerPooling(__A )
SCREAMING_SNAKE_CASE__ = PoolFormerOutput(__A , __A , __A , __A )
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A )
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A )
# Useful for training neural nets
SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity()
SCREAMING_SNAKE_CASE__ = config.use_layer_scale
if config.use_layer_scale:
SCREAMING_SNAKE_CASE__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
def _snake_case ( self :Optional[Any] , __A :Optional[int] ) -> str:
"""simple docstring"""
if self.use_layer_scale:
SCREAMING_SNAKE_CASE__ = self.pooling(self.before_norm(__A ) )
SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A )
SCREAMING_SNAKE_CASE__ = ()
SCREAMING_SNAKE_CASE__ = self.output(self.after_norm(__A ) )
SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A )
SCREAMING_SNAKE_CASE__ = (output,) + outputs
return outputs
else:
SCREAMING_SNAKE_CASE__ = self.drop_path(self.pooling(self.before_norm(__A ) ) )
# First residual connection
SCREAMING_SNAKE_CASE__ = pooling_output + hidden_states
SCREAMING_SNAKE_CASE__ = ()
# Second residual connection inside the PoolFormerOutput block
SCREAMING_SNAKE_CASE__ = self.drop_path(self.output(self.after_norm(__A ) ) )
SCREAMING_SNAKE_CASE__ = hidden_states + layer_output
SCREAMING_SNAKE_CASE__ = (output,) + outputs
return outputs
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Union[str, Any] , __A :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = config
# stochastic depth decay rule
SCREAMING_SNAKE_CASE__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
SCREAMING_SNAKE_CASE__ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A )
# Transformer blocks
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
SCREAMING_SNAKE_CASE__ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__A ) )
SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A )
def _snake_case ( self :str , __A :Tuple , __A :Dict=False , __A :Tuple=True ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = () if output_hidden_states else None
SCREAMING_SNAKE_CASE__ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = layers
# Get patch embeddings from hidden_states
SCREAMING_SNAKE_CASE__ = embedding_layer(__A )
# Send the embeddings through the blocks
for _, blk in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = blk(__A )
SCREAMING_SNAKE_CASE__ = layer_outputs[0]
if output_hidden_states:
SCREAMING_SNAKE_CASE__ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = PoolFormerConfig
lowerCamelCase_ = "poolformer"
lowerCamelCase_ = "pixel_values"
lowerCamelCase_ = True
def _snake_case ( self :Optional[Any] , __A :Tuple ) -> Dict:
"""simple docstring"""
if isinstance(__A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__A , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _snake_case ( self :str , __A :Optional[Any] , __A :Union[str, Any]=False ) -> Any:
"""simple docstring"""
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = value
_lowerCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_lowerCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , __A :Any ) -> int:
"""simple docstring"""
super().__init__(__A )
SCREAMING_SNAKE_CASE__ = config
SCREAMING_SNAKE_CASE__ = PoolFormerEncoder(__A )
# Initialize weights and apply final processing
self.post_init()
def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case ( self :Dict , __A :Optional[torch.FloatTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
SCREAMING_SNAKE_CASE__ = self.encoder(
__A , output_hidden_states=__A , return_dict=__A , )
SCREAMING_SNAKE_CASE__ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :int , __A :Optional[int] ) -> Tuple:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(config.hidden_size , config.hidden_size )
def _snake_case ( self :List[Any] , __A :Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.dense(__A )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , UpperCamelCase__ , )
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :str , __A :Union[str, Any] ) -> int:
"""simple docstring"""
super().__init__(__A )
SCREAMING_SNAKE_CASE__ = config.num_labels
SCREAMING_SNAKE_CASE__ = PoolFormerModel(__A )
# Final norm
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
SCREAMING_SNAKE_CASE__ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case ( self :int , __A :Optional[torch.FloatTensor] = None , __A :Optional[torch.LongTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ = self.poolformer(
__A , output_hidden_states=__A , return_dict=__A , )
SCREAMING_SNAKE_CASE__ = outputs[0]
SCREAMING_SNAKE_CASE__ = self.classifier(self.norm(__A ).mean([-2, -1] ) )
SCREAMING_SNAKE_CASE__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE__ = """single_label_classification"""
else:
SCREAMING_SNAKE_CASE__ = """multi_label_classification"""
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE__ = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE__ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE__ = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A )
if not return_dict:
SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states )
| 6
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowercase ( __lowerCamelCase ):
lowerCamelCase_ ='time_series_transformer'
lowerCamelCase_ ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : List[str] , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : str = "student_t" , __lowerCAmelCase : str = "nll" , __lowerCAmelCase : int = 1 , __lowerCAmelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowerCAmelCase : Optional[Union[str, bool]] = "mean" , __lowerCAmelCase : int = 0 , __lowerCAmelCase : int = 0 , __lowerCAmelCase : int = 0 , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : int = 32 , __lowerCAmelCase : int = 32 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : bool = True , __lowerCAmelCase : str = "gelu" , __lowerCAmelCase : int = 64 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : int = 100 , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : str=True , **__lowerCAmelCase : int , ) -> List[str]:
# time series specific configuration
lowercase_ = prediction_length
lowercase_ = context_length or prediction_length
lowercase_ = distribution_output
lowercase_ = loss
lowercase_ = input_size
lowercase_ = num_time_features
lowercase_ = lags_sequence
lowercase_ = scaling
lowercase_ = num_dynamic_real_features
lowercase_ = num_static_real_features
lowercase_ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__lowerCAmelCase) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`")
lowercase_ = cardinality
else:
lowercase_ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowerCAmelCase) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`")
lowercase_ = embedding_dimension
else:
lowercase_ = [min(50 , (cat + 1) // 2) for cat in self.cardinality]
lowercase_ = num_parallel_samples
# Transformer architecture configuration
lowercase_ = input_size * len(__lowerCAmelCase) + self._number_of_features
lowercase_ = d_model
lowercase_ = encoder_attention_heads
lowercase_ = decoder_attention_heads
lowercase_ = encoder_ffn_dim
lowercase_ = decoder_ffn_dim
lowercase_ = encoder_layers
lowercase_ = decoder_layers
lowercase_ = dropout
lowercase_ = attention_dropout
lowercase_ = activation_dropout
lowercase_ = encoder_layerdrop
lowercase_ = decoder_layerdrop
lowercase_ = activation_function
lowercase_ = init_std
lowercase_ = use_cache
super().__init__(is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase)
@property
def __UpperCAmelCase ( self : Optional[int]) -> int:
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 461
|
'''simple docstring'''
from __future__ import annotations
def __a ( __lowerCamelCase : list[list[int]] ) -> bool:
'''simple docstring'''
lowercase_ = len(__lowerCamelCase )
# We need to create solution object to save path.
lowercase_ = [[0 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
lowercase_ = run_maze(__lowerCamelCase , 0 , 0 , __lowerCamelCase )
if solved:
print("\n".join(str(__lowerCamelCase ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def __a ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[list[int]] ) -> bool:
'''simple docstring'''
lowercase_ = len(__lowerCamelCase )
# Final check point.
if i == j == (size - 1):
lowercase_ = 1
return True
lowercase_ = (not i < 0) and (not j < 0) # Check lower bounds
lowercase_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowercase_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowercase_ = 1
# check for directions
if (
run_maze(__lowerCamelCase , i + 1 , __lowerCamelCase , __lowerCamelCase )
or run_maze(__lowerCamelCase , __lowerCamelCase , j + 1 , __lowerCamelCase )
or run_maze(__lowerCamelCase , i - 1 , __lowerCamelCase , __lowerCamelCase )
or run_maze(__lowerCamelCase , __lowerCamelCase , j - 1 , __lowerCamelCase )
):
return True
lowercase_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 461
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 182
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = KandinskyInpaintPipeline
SCREAMING_SNAKE_CASE = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
SCREAMING_SNAKE_CASE = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
SCREAMING_SNAKE_CASE = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
SCREAMING_SNAKE_CASE = False
@property
def _a (self ):
"""simple docstring"""
return 32
@property
def _a (self ):
"""simple docstring"""
return 32
@property
def _a (self ):
"""simple docstring"""
return self.time_input_dim
@property
def _a (self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _a (self ):
"""simple docstring"""
return 100
@property
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def _a (self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
UpperCAmelCase__ : List[Any] = MultilingualCLIP(_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def _a (self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ : Dict = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCAmelCase__ : List[Any] = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def _a (self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _a (self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ : Any = VQModel(**self.dummy_movq_kwargs )
return model
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = self.dummy_text_encoder
UpperCAmelCase__ : Optional[Any] = self.dummy_tokenizer
UpperCAmelCase__ : Optional[Any] = self.dummy_unet
UpperCAmelCase__ : List[Any] = self.dummy_movq
UpperCAmelCase__ : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=_lowerCamelCase , )
UpperCAmelCase__ : Any = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a (self , _lowerCamelCase , _lowerCamelCase=0 ):
"""simple docstring"""
UpperCAmelCase__ : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase )
# create init_image
UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Optional[Any] = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
UpperCAmelCase__ : List[Any] = np.ones((64, 64) , dtype=np.floataa )
UpperCAmelCase__ : Optional[int] = 0
if str(_lowerCamelCase ).startswith("""mps""" ):
UpperCAmelCase__ : Tuple = torch.manual_seed(_lowerCamelCase )
else:
UpperCAmelCase__ : Optional[int] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
UpperCAmelCase__ : Any = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = """cpu"""
UpperCAmelCase__ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase__ : Optional[Any] = self.pipeline_class(**_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCAmelCase__ : List[str] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
UpperCAmelCase__ : Optional[int] = output.images
UpperCAmelCase__ : Union[str, Any] = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
UpperCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Any = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def _a (self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
UpperCAmelCase__ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
UpperCAmelCase__ : List[str] = np.ones((768, 768) , dtype=np.floataa )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : int = """a hat"""
UpperCAmelCase__ : List[Any] = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
UpperCAmelCase__ : str = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
UpperCAmelCase__ : int = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
UpperCAmelCase__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = pipe_prior(
_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
UpperCAmelCase__ : Union[str, Any] = pipeline(
_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
UpperCAmelCase__ : Any = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 182
| 1
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( __magic_name__ ,unittest.TestCase ):
_snake_case = MgpstrTokenizer
_snake_case = False
_snake_case = {}
_snake_case = False
def __snake_case ( self : Optional[Any] ):
super().setUp()
# fmt: off
snake_case : int = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
snake_case : int = dict(zip(_SCREAMING_SNAKE_CASE, range(len(_SCREAMING_SNAKE_CASE ) ) ) )
snake_case : str = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '''\n''' )
def __snake_case ( self : Any, **SCREAMING_SNAKE_CASE_ : Any ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **_SCREAMING_SNAKE_CASE )
def __snake_case ( self : str, SCREAMING_SNAKE_CASE_ : List[Any] ):
snake_case : int = '''tester'''
snake_case : Union[str, Any] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def __snake_case ( self : Optional[int] ):
pass
def __snake_case ( self : Tuple ):
snake_case : str = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case : List[str] = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
snake_case : Tuple = tokenizer.encode([special_token], add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ), 1 )
snake_case : str = tokenizer.decode(_SCREAMING_SNAKE_CASE, skip_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertTrue(special_token not in decoded )
def __snake_case ( self : int ):
snake_case : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case, snake_case : Optional[Any] = self.get_input_output_texts(_SCREAMING_SNAKE_CASE )
snake_case : Any = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = tokenizer.encode(_SCREAMING_SNAKE_CASE, add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertNotEqual(len(_SCREAMING_SNAKE_CASE ), 0 )
snake_case : List[Any] = tokenizer.decode(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
self.assertEqual(text_a.replace(''' ''', '''''' ), _SCREAMING_SNAKE_CASE )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def __snake_case ( self : Dict ):
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def __snake_case ( self : Tuple ):
pass
| 713
|
'''simple docstring'''
from __future__ import annotations
import math
def A ( A_ : int ):
if num <= 0:
snake_case : List[Any] = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(A_ )
snake_case : Optional[Any] = [True] * (num + 1)
snake_case : List[str] = []
snake_case : List[Any] = 2
snake_case : str = int(math.sqrt(A_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(A_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , A_ ):
if sieve[i] is True:
snake_case : int = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(A_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 555
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__UpperCAmelCase = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__UpperCAmelCase = 'UperNetConfig'
class __lowercase ( nn.Module ):
def __init__( self : int ,A : int ,A : int ,A : Union[int, Tuple[int, int]] ,A : Union[int, Tuple[int, int], str] = 0 ,A : bool = False ,A : Union[int, Tuple[int, int]] = 1 ,):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Any = nn.Convad(
in_channels=A ,out_channels=A ,kernel_size=A ,padding=A ,bias=A ,dilation=A ,)
UpperCAmelCase__ : List[Any] = nn.BatchNormad(A )
UpperCAmelCase__ : Optional[Any] = nn.ReLU()
def __lowercase ( self : List[Any] ,A : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.conv(A )
UpperCAmelCase__ : Optional[Any] = self.batch_norm(A )
UpperCAmelCase__ : Dict = self.activation(A )
return output
class __lowercase ( nn.Module ):
def __init__( self : int ,A : int ,A : int ,A : int ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : int = [
nn.AdaptiveAvgPoolad(A ),
UperNetConvModule(A ,A ,kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(A ) ,A )
def __lowercase ( self : Dict ,A : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = input
for layer in self.layers:
UpperCAmelCase__ : Optional[int] = layer(A )
return hidden_state
class __lowercase ( nn.Module ):
def __init__( self : Union[str, Any] ,A : Tuple[int, ...] ,A : int ,A : int ,A : bool ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Dict = pool_scales
UpperCAmelCase__ : Optional[int] = align_corners
UpperCAmelCase__ : Optional[int] = in_channels
UpperCAmelCase__ : List[str] = channels
UpperCAmelCase__ : Tuple = []
for i, pool_scale in enumerate(A ):
UpperCAmelCase__ : str = UperNetPyramidPoolingBlock(pool_scale=A ,in_channels=A ,channels=A )
self.blocks.append(A )
self.add_module(str(A ) ,A )
def __lowercase ( self : List[Any] ,A : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = []
for ppm in self.blocks:
UpperCAmelCase__ : Tuple = ppm(A )
UpperCAmelCase__ : List[str] = nn.functional.interpolate(
A ,size=x.size()[2:] ,mode="""bilinear""" ,align_corners=self.align_corners )
ppm_outs.append(A )
return ppm_outs
class __lowercase ( nn.Module ):
def __init__( self : Union[str, Any] ,A : str ,A : Optional[Any] ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : str = config
UpperCAmelCase__ : Union[str, Any] = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCAmelCase__ : Tuple = in_channels
UpperCAmelCase__ : Union[str, Any] = config.hidden_size
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Optional[Any] = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 )
# PSP Module
UpperCAmelCase__ : int = UperNetPyramidPoolingModule(
self.pool_scales ,self.in_channels[-1] ,self.channels ,align_corners=self.align_corners ,)
UpperCAmelCase__ : Tuple = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,)
# FPN Module
UpperCAmelCase__ : Tuple = nn.ModuleList()
UpperCAmelCase__ : Optional[int] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCAmelCase__ : Optional[Any] = UperNetConvModule(A ,self.channels ,kernel_size=1 )
UpperCAmelCase__ : Dict = UperNetConvModule(self.channels ,self.channels ,kernel_size=3 ,padding=1 )
self.lateral_convs.append(A )
self.fpn_convs.append(A )
UpperCAmelCase__ : Tuple = UperNetConvModule(
len(self.in_channels ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,)
def __lowercase ( self : Dict ):
'''simple docstring'''
self.apply(self._init_weights )
def __lowercase ( self : Dict ,A : List[Any] ):
'''simple docstring'''
if isinstance(A ,nn.Convad ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __lowercase ( self : str ,A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = inputs[-1]
UpperCAmelCase__ : Any = [x]
psp_outs.extend(self.psp_modules(A ) )
UpperCAmelCase__ : Union[str, Any] = torch.cat(A ,dim=1 )
UpperCAmelCase__ : Dict = self.bottleneck(A )
return output
def __lowercase ( self : List[Any] ,A : torch.Tensor ):
'''simple docstring'''
# build laterals
UpperCAmelCase__ : Optional[int] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(A ) )
# build top-down path
UpperCAmelCase__ : Optional[int] = len(A )
for i in range(used_backbone_levels - 1 ,0 ,-1 ):
UpperCAmelCase__ : Optional[Any] = laterals[i - 1].shape[2:]
UpperCAmelCase__ : List[str] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] ,size=A ,mode="""bilinear""" ,align_corners=self.align_corners )
# build outputs
UpperCAmelCase__ : List[Any] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 ,0 ,-1 ):
UpperCAmelCase__ : List[Any] = nn.functional.interpolate(
fpn_outs[i] ,size=fpn_outs[0].shape[2:] ,mode="""bilinear""" ,align_corners=self.align_corners )
UpperCAmelCase__ : str = torch.cat(A ,dim=1 )
UpperCAmelCase__ : Dict = self.fpn_bottleneck(A )
UpperCAmelCase__ : Tuple = self.classifier(A )
return output
class __lowercase ( nn.Module ):
def __init__( self : Optional[Any] ,A : Tuple ,A : int = 2 ,A : int = 3 ,A : Union[int, Tuple[int, int]] = 1 ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Optional[Any] = config
UpperCAmelCase__ : List[str] = config.auxiliary_in_channels
UpperCAmelCase__ : int = config.auxiliary_channels
UpperCAmelCase__ : List[Any] = config.auxiliary_num_convs
UpperCAmelCase__ : Any = config.auxiliary_concat_input
UpperCAmelCase__ : Tuple = in_index
UpperCAmelCase__ : Optional[int] = (kernel_size // 2) * dilation
UpperCAmelCase__ : Optional[Any] = []
convs.append(
UperNetConvModule(
self.in_channels ,self.channels ,kernel_size=A ,padding=A ,dilation=A ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels ,self.channels ,kernel_size=A ,padding=A ,dilation=A ) )
if self.num_convs == 0:
UpperCAmelCase__ : Tuple = nn.Identity()
else:
UpperCAmelCase__ : List[str] = nn.Sequential(*A )
if self.concat_input:
UpperCAmelCase__ : str = UperNetConvModule(
self.in_channels + self.channels ,self.channels ,kernel_size=A ,padding=kernel_size // 2 )
UpperCAmelCase__ : int = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
self.apply(self._init_weights )
def __lowercase ( self : Any ,A : Any ):
'''simple docstring'''
if isinstance(A ,nn.Convad ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __lowercase ( self : Optional[int] ,A : torch.Tensor ):
'''simple docstring'''
# just take the relevant feature maps
UpperCAmelCase__ : int = encoder_hidden_states[self.in_index]
UpperCAmelCase__ : int = self.convs(A )
if self.concat_input:
UpperCAmelCase__ : str = self.conv_cat(torch.cat([hidden_states, output] ,dim=1 ) )
UpperCAmelCase__ : Tuple = self.classifier(A )
return output
class __lowercase ( __lowerCamelCase ):
snake_case_ = UperNetConfig
snake_case_ = """pixel_values"""
snake_case_ = True
def __lowercase ( self : Dict ,A : str ):
'''simple docstring'''
if isinstance(A ,A ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __lowercase ( self : str ):
'''simple docstring'''
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __lowercase ( self : Union[str, Any] ,A : List[Any] ,A : int=False ):
'''simple docstring'''
if isinstance(A ,A ):
UpperCAmelCase__ : str = value
__UpperCAmelCase = r'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__UpperCAmelCase = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , __lowerCamelCase , )
class __lowercase ( __lowerCamelCase ):
def __init__( self : Dict ,A : Optional[int] ):
'''simple docstring'''
super().__init__(A )
UpperCAmelCase__ : Any = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCAmelCase__ : Any = UperNetHead(A ,in_channels=self.backbone.channels )
UpperCAmelCase__ : Dict = UperNetFCNHead(A ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=A ,config_class=_CONFIG_FOR_DOC )
def __lowercase ( self : Optional[int] ,A : Optional[torch.Tensor] = None ,A : Optional[bool] = None ,A : Optional[bool] = None ,A : Optional[torch.Tensor] = None ,A : Optional[bool] = None ,):
'''simple docstring'''
UpperCAmelCase__ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase__ : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCAmelCase__ : int = self.backbone.forward_with_filtered_kwargs(
A ,output_hidden_states=A ,output_attentions=A )
UpperCAmelCase__ : List[Any] = outputs.feature_maps
UpperCAmelCase__ : int = self.decode_head(A )
UpperCAmelCase__ : Optional[Any] = nn.functional.interpolate(A ,size=pixel_values.shape[2:] ,mode="""bilinear""" ,align_corners=A )
UpperCAmelCase__ : int = None
if self.auxiliary_head is not None:
UpperCAmelCase__ : Union[str, Any] = self.auxiliary_head(A )
UpperCAmelCase__ : Tuple = nn.functional.interpolate(
A ,size=pixel_values.shape[2:] ,mode="""bilinear""" ,align_corners=A )
UpperCAmelCase__ : Optional[int] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
UpperCAmelCase__ : Optional[Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCAmelCase__ : Optional[int] = loss_fct(A ,A )
UpperCAmelCase__ : Any = loss_fct(A ,A )
UpperCAmelCase__ : int = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCAmelCase__ : Optional[int] = (logits,) + outputs[1:]
else:
UpperCAmelCase__ : Dict = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=A ,logits=A ,hidden_states=outputs.hidden_states ,attentions=outputs.attentions ,)
| 65
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = JukeboxTokenizer
UpperCAmelCase__ = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
import torch
A__ = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''')
A__ = tokenizer(**self.metas)['''input_ids''']
# fmt: off
A__ = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]]),
torch.tensor([[0, 0, 0, 1_069, 11]]),
torch.tensor([[0, 0, 0, 1_069, 11]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]:
'''simple docstring'''
import torch
A__ = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''')
A__ = tokenizer(**self.metas)['''input_ids''']
# fmt: off
A__ = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]]),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]]),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
| 87
| 0
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class _lowercase ( __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = DebertaVaTokenizer
lowercase__ = DebertaVaTokenizerFast
lowercase__ = True
lowercase__ = True
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase =DebertaVaTokenizer(UpperCamelCase__ , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : int , UpperCamelCase__ : Tuple ) -> int:
'''simple docstring'''
__UpperCamelCase ='''this is a test'''
__UpperCamelCase ='''this is a test'''
return input_text, output_text
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
__UpperCamelCase ='''<pad>'''
__UpperCamelCase =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(UpperCamelCase__ ) , 30001 )
def UpperCAmelCase_ ( self : int ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
__UpperCamelCase =''' \tHeLLo!how \n Are yoU? '''
__UpperCamelCase =['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
__UpperCamelCase =DebertaVaTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =DebertaVaTokenizerFast(UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
__UpperCamelCase ='''I was born in 92000, and this is falsé.'''
__UpperCamelCase =['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
__UpperCamelCase =DebertaVaTokenizer(UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =DebertaVaTokenizerFast(UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
__UpperCamelCase ='''I was born in 92000, and this is falsé.'''
__UpperCamelCase =['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
__UpperCamelCase =DebertaVaTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =DebertaVaTokenizerFast(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase ='''I was born in 92000, and this is falsé.'''
__UpperCamelCase =['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
__UpperCamelCase =DebertaVaTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =DebertaVaTokenizerFast(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
__UpperCamelCase ='''I was born in 92000, and this is falsé.'''
__UpperCamelCase =['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
__UpperCamelCase =DebertaVaTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =DebertaVaTokenizerFast(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase =''' \tHeLLo!how \n Are yoU? '''
__UpperCamelCase =['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
__UpperCamelCase =DebertaVaTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =DebertaVaTokenizerFast(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_rust_tokenizer()
__UpperCamelCase ='''I was born in 92000, and this is falsé.'''
__UpperCamelCase =tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
__UpperCamelCase =rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =self.get_rust_tokenizer()
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ )
__UpperCamelCase =rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
__UpperCamelCase ='''This is a test'''
__UpperCamelCase =[13, 1, 4398, 25, 21, 1289]
__UpperCamelCase =['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
__UpperCamelCase =['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
__UpperCamelCase =DebertaVaTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
__UpperCamelCase =DebertaVaTokenizerFast(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
# fmt: off
__UpperCamelCase ='''I was born in 92000, and this is falsé.'''
__UpperCamelCase =[13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
__UpperCamelCase =['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
__UpperCamelCase =['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =DebertaVaTokenizer(UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode('''sequence builders''' )
__UpperCamelCase =tokenizer.encode('''multi-sequence build''' )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCamelCase__ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCamelCase__ , )
@slow
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
'''simple docstring'''
__UpperCamelCase ={'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 712
|
"""simple docstring"""
class _lowercase :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : Any ) -> int:
'''simple docstring'''
__UpperCamelCase =arr.split(''',''' )
def UpperCAmelCase_ ( self : List[Any] ) -> int:
'''simple docstring'''
__UpperCamelCase =[int(self.array[0] )] * len(self.array )
__UpperCamelCase =[int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
__UpperCamelCase =max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
__UpperCamelCase =max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__lowercase = input('''please input some numbers:''')
__lowercase = SubArray(whole_array)
__lowercase = array.solve_sub_array()
print(('''the results is:''', re))
| 296
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
UpperCAmelCase_ = DetaConfig(
backbone_config=lowerCAmelCase__ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=lowerCAmelCase__ , with_box_refine=lowerCAmelCase__ , two_stage=lowerCAmelCase__ , )
# set labels
UpperCAmelCase_ = "huggingface/label-files"
if "o365" in model_name:
UpperCAmelCase_ = 366
UpperCAmelCase_ = "object365-id2label.json"
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = "coco-detection-id2label.json"
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = json.load(open(cached_download(hf_hub_url(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) ) , "r" ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
return config
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.reduction.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.bias""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", f"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", f"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", f"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", f"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", f"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", f"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.weight""", f"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.weight""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.weight""", f"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.bias""", f"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = dct.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:dim, :]
UpperCAmelCase_ = in_proj_bias[: dim]
UpperCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_ = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_ = in_proj_weight[
-dim :, :
]
UpperCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
# transformer decoder self-attention layers
UpperCAmelCase_ = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:hidden_size, :]
UpperCAmelCase_ = in_proj_bias[:hidden_size]
UpperCAmelCase_ = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ = in_proj_weight[-hidden_size:, :]
UpperCAmelCase_ = in_proj_bias[-hidden_size:]
def a__ ( ):
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = get_deta_config(lowerCAmelCase__ )
# load original state dict
if model_name == "deta-swin-large":
UpperCAmelCase_ = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
UpperCAmelCase_ = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
UpperCAmelCase_ = torch.load(lowerCAmelCase__ , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(lowerCAmelCase__ , param.shape )
# rename keys
UpperCAmelCase_ = create_rename_keys(lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_swin_q_k_v(lowerCAmelCase__ , config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
if "input_proj" in key:
UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = DetaForObjectDetection(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
model.to(lowerCAmelCase__ )
# load image processor
UpperCAmelCase_ = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = processor(images=lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase_ = encoding["pixel_values"]
UpperCAmelCase_ = model(pixel_values.to(lowerCAmelCase__ ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCAmelCase_ = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
UpperCAmelCase_ = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
UpperCAmelCase_ = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
UpperCAmelCase_ = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(lowerCAmelCase__ ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(lowerCAmelCase__ ) , atol=1e-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(f"""jozhang97/{model_name}""" )
processor.push_to_hub(f"""jozhang97/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
type=str,
default="""deta-swin-large""",
choices=["""deta-swin-large""", """deta-swin-large-o365"""],
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
help="""Path to the folder to output PyTorch model.""",
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 82
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : int=1_3 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : List[str]=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Tuple=2_2_4 , __lowerCamelCase : int=1_0_0_0 , __lowerCamelCase : int=[3, 3, 6, 4] , __lowerCamelCase : Dict=[4_8, 5_6, 1_1_2, 2_2_0] , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = num_labels
UpperCAmelCase = image_size
UpperCAmelCase = layer_depths
UpperCAmelCase = embed_dims
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Optional[Any] ) -> str:
"""simple docstring"""
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__lowerCamelCase , layer_scale_init_value=1e-5 , )
def _lowercase ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] ) -> str:
"""simple docstring"""
UpperCAmelCase = SwiftFormerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCAmelCase = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _lowercase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCAmelCase = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
UpperCAmelCase = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = self.prepare_config_and_inputs()
UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( __snake_case , __snake_case , unittest.TestCase ):
UpperCamelCase = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowercase ( self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = SwiftFormerModelTester(self )
UpperCAmelCase = ConfigTester(
self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def _lowercase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _lowercase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
def _lowercase ( self : str ) -> str:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__lowerCamelCase )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__lowerCamelCase )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowercase ( self : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = SwiftFormerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
def _lowercase ( self : Dict ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(__lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ):
UpperCAmelCase = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = 8
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__lowerCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowercase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
def _config_zero_init(__lowerCamelCase : Any ):
UpperCAmelCase = copy.deepcopy(__lowerCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__lowerCamelCase , __lowerCamelCase , 1e-1_0 )
if isinstance(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ):
UpperCAmelCase = _config_zero_init(getattr(__lowerCamelCase , __lowerCamelCase ) )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return configs_no_init
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(config=__lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowercase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
def _UpperCamelCase ( ) ->Dict:
UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def _lowercase ( self : Optional[int] ) -> int:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _lowercase ( self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCAmelCase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(__lowerCamelCase )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__lowerCamelCase , return_tensors="""pt""" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**__lowerCamelCase )
# verify the logits
UpperCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
UpperCAmelCase = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 377
| 0
|
_A = 'Alexander Joslin'
import operator as op
from .stack import Stack
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase ={'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
__UpperCamelCase =Stack()
__UpperCamelCase =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE__ ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE__ )
elif i == ")":
# RULE 4
__UpperCamelCase =operator_stack.peek()
operator_stack.pop()
__UpperCamelCase =operand_stack.peek()
operand_stack.pop()
__UpperCamelCase =operand_stack.peek()
operand_stack.pop()
__UpperCamelCase =operators[opr](SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
operand_stack.push(SCREAMING_SNAKE_CASE__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_A = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 712
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Dict = "blip_text_model"
def __init__( self , A_=30524 , A_=768 , A_=768 , A_=3072 , A_=768 , A_=12 , A_=8 , A_=512 , A_="gelu" , A_=1E-12 , A_=0.0 , A_=0.0 , A_=0.02 , A_=30522 , A_=2 , A_=0 , A_=102 , A_=True , A_=True , **A_ , ) -> Optional[int]:
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , sep_token_id=A_ , **A_ , )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =encoder_hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =is_decoder
__UpperCamelCase =use_cache
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "blip_vision_model"
def __init__( self , A_=768 , A_=3072 , A_=512 , A_=12 , A_=12 , A_=384 , A_=16 , A_="gelu" , A_=1E-5 , A_=0.0 , A_=1E-10 , **A_ , ) -> Optional[Any]:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =patch_size
__UpperCamelCase =image_size
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_dropout
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : int = "blip"
UpperCAmelCase__ : Optional[int] = True
def __init__( self , A_=None , A_=None , A_=512 , A_=2.6592 , A_=256 , **A_ , ) -> Union[str, Any]:
super().__init__(**A_ )
if text_config is None:
__UpperCamelCase ={}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
__UpperCamelCase ={}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
__UpperCamelCase =BlipTextConfig(**A_ )
__UpperCamelCase =BlipVisionConfig(**A_ )
__UpperCamelCase =self.vision_config.hidden_size
__UpperCamelCase =projection_dim
__UpperCamelCase =logit_scale_init_value
__UpperCamelCase =1.0
__UpperCamelCase =0.02
__UpperCamelCase =image_text_hidden_size
@classmethod
def _a ( cls , A_ , A_ , **A_ ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.text_config.to_dict()
__UpperCamelCase =self.vision_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 682
| 0
|
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
lowerCAmelCase : Any = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
lowerCAmelCase : Optional[Any] = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def a__ ( ) -> Dict:
lowerCamelCase = calculate_rouge(snake_case__ , snake_case__ , bootstrap_aggregation=snake_case__ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(snake_case__ , snake_case__ )
lowerCamelCase = calculate_rouge(snake_case__ , snake_case__ , bootstrap_aggregation=snake_case__ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def a__ ( ) -> Optional[int]:
lowerCamelCase = """rougeLsum"""
lowerCamelCase = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=[k] )[k]
lowerCamelCase = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def a__ ( ) -> Union[str, Any]:
lowerCamelCase = ["""rouge1""", """rouge2""", """rougeL"""]
lowerCamelCase = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=snake_case__ )
lowerCamelCase = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=snake_case__ )
assert score_sep == score_no_sep
def a__ ( ) -> List[str]:
lowerCamelCase = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowerCamelCase = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ ) == calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ )
def a__ ( ) -> int:
lowerCamelCase = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowerCamelCase = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowerCamelCase = calculate_rouge(snake_case__ , snake_case__ , rouge_keys=["""rougeLsum"""] , newline_sep=snake_case__ )["""rougeLsum"""]
lowerCamelCase = calculate_rouge(snake_case__ , snake_case__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def a__ ( ) -> Dict:
lowerCamelCase = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowerCamelCase = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(snake_case__ , snake_case__ )
lowerCamelCase = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=snake_case__ )
assert isinstance(snake_case__ , snake_case__ )
| 543
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase : Optional[int] = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowerCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 543
| 1
|
# flake8: noqa
# Lint as: python3
UpperCamelCase__ : List[Any] = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 704
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int=7 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
if token is not None:
SCREAMING_SNAKE_CASE_ = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
SCREAMING_SNAKE_CASE_ = '636036'
SCREAMING_SNAKE_CASE_ = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
SCREAMING_SNAKE_CASE_ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
return result["workflow_runs"]
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
SCREAMING_SNAKE_CASE_ = workflow_run['id']
break
return workflow_run_id
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE )
if workflow_run_id is not None:
SCREAMING_SNAKE_CASE_ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
SCREAMING_SNAKE_CASE_ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = {}
for artifact_name in artifact_names:
SCREAMING_SNAKE_CASE_ = os.path.join(_SCREAMING_SNAKE_CASE , f"""{artifact_name}.zip""" )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = {}
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
with z.open(_SCREAMING_SNAKE_CASE ) as f:
SCREAMING_SNAKE_CASE_ = f.read().decode('UTF-8' )
return results
| 620
| 0
|
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
__magic_name__ = "src/transformers"
# Matches is_xxx_available()
__magic_name__ = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__magic_name__ = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__magic_name__ = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__magic_name__ = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__magic_name__ = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__magic_name__ = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__magic_name__ = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__magic_name__ = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__magic_name__ = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__magic_name__ = re.compile(R"^\s*try:")
# Catches a line with else:
__magic_name__ = re.compile(R"^\s*else:")
def _lowerCAmelCase ( UpperCamelCase_ ):
if _re_test_backend.search(UpperCamelCase_ ) is None:
return None
__SCREAMING_SNAKE_CASE = [b[0] for b in _re_backend.findall(UpperCamelCase_ )]
backends.sort()
return "_and_".join(UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
with open(UpperCamelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
__SCREAMING_SNAKE_CASE = 0
while line_index < len(UpperCamelCase_ ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
__SCREAMING_SNAKE_CASE = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
__SCREAMING_SNAKE_CASE = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = _re_one_line_import_struct.search(UpperCamelCase_ ).groups()[0]
__SCREAMING_SNAKE_CASE = re.findall("""\[([^\]]+)\]""" , UpperCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
__SCREAMING_SNAKE_CASE = _re_import_struct_key_value.search(UpperCamelCase_ )
if single_line_import_search is not None:
__SCREAMING_SNAKE_CASE = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
__SCREAMING_SNAKE_CASE = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__SCREAMING_SNAKE_CASE = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__SCREAMING_SNAKE_CASE = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__SCREAMING_SNAKE_CASE = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
__SCREAMING_SNAKE_CASE = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase_ ) is not None:
__SCREAMING_SNAKE_CASE = _re_import_struct_add_many.search(UpperCamelCase_ ).groups()[0].split(""", """ )
__SCREAMING_SNAKE_CASE = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_between_brackets.search(UpperCamelCase_ ) is not None:
__SCREAMING_SNAKE_CASE = _re_between_brackets.search(UpperCamelCase_ ).groups()[0].split(""", """ )
__SCREAMING_SNAKE_CASE = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_quote_object.search(UpperCamelCase_ ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase_ ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
__SCREAMING_SNAKE_CASE = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__SCREAMING_SNAKE_CASE = []
while (
line_index < len(UpperCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
__SCREAMING_SNAKE_CASE = lines[line_index]
__SCREAMING_SNAKE_CASE = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
__SCREAMING_SNAKE_CASE = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
__SCREAMING_SNAKE_CASE = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__SCREAMING_SNAKE_CASE = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__SCREAMING_SNAKE_CASE = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
__SCREAMING_SNAKE_CASE = lines[line_index]
__SCREAMING_SNAKE_CASE = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
__SCREAMING_SNAKE_CASE = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
def find_duplicates(UpperCamelCase_ ):
return [k for k, v in collections.Counter(UpperCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__SCREAMING_SNAKE_CASE = []
for key in import_dict_objects.keys():
__SCREAMING_SNAKE_CASE = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}" )
__SCREAMING_SNAKE_CASE = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__SCREAMING_SNAKE_CASE = """base imports""" if key == """none""" else f"{key} backend"
errors.append(f"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = []
for root, _, files in os.walk(UpperCamelCase_ ):
if "__init__.py" in files:
__SCREAMING_SNAKE_CASE = os.path.join(UpperCamelCase_ , """__init__.py""" )
__SCREAMING_SNAKE_CASE = parse_init(UpperCamelCase_ )
if objects is not None:
__SCREAMING_SNAKE_CASE = analyze_results(*UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
__SCREAMING_SNAKE_CASE = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("""\n""".join(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > 0:
raise ValueError("""\n\n""".join(UpperCamelCase_ ) )
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = []
for path, directories, files in os.walk(UpperCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(UpperCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase_ ) / folder).glob("""*.py""" ) ) ) == 0:
continue
__SCREAMING_SNAKE_CASE = str((Path(UpperCamelCase_ ) / folder).relative_to(UpperCamelCase_ ) )
__SCREAMING_SNAKE_CASE = short_path.replace(os.path.sep , """.""" )
submodules.append(UpperCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
__SCREAMING_SNAKE_CASE = str((Path(UpperCamelCase_ ) / fname).relative_to(UpperCamelCase_ ) )
__SCREAMING_SNAKE_CASE = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(UpperCamelCase_ )
return submodules
__magic_name__ = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def _lowerCAmelCase ( ):
# This is to make sure the transformers module imported is the one in the repo.
__SCREAMING_SNAKE_CASE = importlib.util.spec_from_file_location(
"""transformers""" , os.path.join(UpperCamelCase_ , """__init__.py""" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__SCREAMING_SNAKE_CASE = spec.loader.load_module()
__SCREAMING_SNAKE_CASE = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(UpperCamelCase_ ) > 0:
__SCREAMING_SNAKE_CASE = """\n""".join(f"- {module}" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
f"{list_of_modules}\n"
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 155
|
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__magic_name__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
__magic_name__ = json.load(f)
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self , lowerCAmelCase__):
return FSMTTokenizer.from_pretrained(lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration.from_pretrained(lowerCAmelCase__).to(lowerCAmelCase__)
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
])
@slow
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
__SCREAMING_SNAKE_CASE = f"facebook/wmt19-{pair}"
__SCREAMING_SNAKE_CASE = self.get_tokenizer(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.get_model(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = bleu_data[pair]["""src"""]
__SCREAMING_SNAKE_CASE = bleu_data[pair]["""tgt"""]
__SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors="""pt""" , truncation=lowerCAmelCase__ , padding="""longest""").to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = calculate_bleu(lowerCAmelCase__ , lowerCAmelCase__)
print(lowerCAmelCase__)
self.assertGreaterEqual(scores["""bleu"""] , lowerCAmelCase__)
| 155
| 1
|
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : int , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : Union[str, Any]=3 , __UpperCamelCase : str=4 , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : Dict=7 , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : List[Any]=True , __UpperCamelCase : str=True , __UpperCamelCase : Optional[int]=99 , __UpperCamelCase : List[Any]=36 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : str=4 , __UpperCamelCase : List[Any]=37 , __UpperCamelCase : Optional[int]="gelu" , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : str=0.1 , __UpperCamelCase : int=512 , __UpperCamelCase : List[Any]=16 , __UpperCamelCase : int=2 , __UpperCamelCase : Dict=0.02 , __UpperCamelCase : Optional[int]=6 , __UpperCamelCase : str=6 , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : Optional[Any]=4 , __UpperCamelCase : Any=None , __UpperCamelCase : str=1_000 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = (image_size // patch_size) ** 2 + 1
_UpperCAmelCase = self.text_seq_length + self.image_seq_length
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
_UpperCAmelCase = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = tmp_coordinate
_UpperCAmelCase = tf.constant(__UpperCamelCase )
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_UpperCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : str ):
_UpperCAmelCase = TFLayoutLMvaModel(config=__UpperCamelCase )
# text + image
_UpperCAmelCase = model(__UpperCamelCase , pixel_values=__UpperCamelCase , training=__UpperCamelCase )
_UpperCAmelCase = model(
__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , training=__UpperCamelCase , )
_UpperCAmelCase = model(__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_UpperCAmelCase = model(__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_UpperCAmelCase = model({"pixel_values": pixel_values} , training=__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFLayoutLMvaForSequenceClassification(config=__UpperCamelCase )
_UpperCAmelCase = model(
__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : int ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFLayoutLMvaForTokenClassification(config=__UpperCamelCase )
_UpperCAmelCase = model(
__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Any ):
_UpperCAmelCase = 2
_UpperCAmelCase = TFLayoutLMvaForQuestionAnswering(config=__UpperCamelCase )
_UpperCAmelCase = model(
__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , training=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : List[Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE : List[Any] = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
def UpperCAmelCase__ ( self : str , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : str ):
return True
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : List[str]=False ):
_UpperCAmelCase = copy.deepcopy(__UpperCamelCase )
if model_class in get_values(__UpperCamelCase ):
_UpperCAmelCase = {
k: tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__UpperCamelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__UpperCamelCase ):
_UpperCAmelCase = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__UpperCamelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__UpperCamelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__UpperCamelCase ):
_UpperCAmelCase = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = TFLayoutLMvaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
if getattr(__UpperCamelCase , "hf_compute_loss" , __UpperCamelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
_UpperCAmelCase = self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__UpperCamelCase )[0]
]
_UpperCAmelCase = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_UpperCAmelCase = self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = prepared_for_class.pop("input_ids" )
_UpperCAmelCase = model(__UpperCamelCase , **__UpperCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
_UpperCAmelCase = self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
_UpperCAmelCase = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
_UpperCAmelCase = -100
_UpperCAmelCase = tf.convert_to_tensor(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , **__UpperCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
_UpperCAmelCase = self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
_UpperCAmelCase = self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase )
# Get keys that were added with the _prepare_for_class function
_UpperCAmelCase = prepared_for_class.keys() - inputs_dict.keys()
_UpperCAmelCase = inspect.signature(model.call ).parameters
_UpperCAmelCase = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
_UpperCAmelCase = {0: "input_ids"}
for label_key in label_keys:
_UpperCAmelCase = signature_names.index(__UpperCamelCase )
_UpperCAmelCase = label_key
_UpperCAmelCase = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
_UpperCAmelCase = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
_UpperCAmelCase = prepared_for_class[value]
_UpperCAmelCase = tuple(__UpperCamelCase )
# Send to model
_UpperCAmelCase = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def UpperCAmelCase__ ( self : List[str] ):
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Any ):
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Any ):
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@slow
def UpperCAmelCase__ ( self : Dict ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFLayoutLMvaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCamelCase ( ) -> Tuple:
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : Optional[Any] ):
return LayoutLMvaImageProcessor(apply_ocr=__UpperCamelCase ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="tf" ).pixel_values
_UpperCAmelCase = tf.constant([[1, 2]] )
_UpperCAmelCase = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
_UpperCAmelCase = model(input_ids=__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , training=__UpperCamelCase )
# verify the logits
_UpperCAmelCase = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , __UpperCamelCase )
_UpperCAmelCase = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCamelCase , atol=1e-4 ) )
| 129
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__lowerCAmelCase = "\\n\n"
__lowerCAmelCase = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
__lowerCAmelCase = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __SCREAMING_SNAKE_CASE ( datasets.Metric):
def UpperCAmelCase__ ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def UpperCAmelCase__ ( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : int = 16 , __UpperCamelCase : bool = True , __UpperCamelCase : Tuple=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_UpperCAmelCase = "cuda"
else:
_UpperCAmelCase = "cuda" if torch.cuda.is_available() else "cpu"
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = model.to(__UpperCamelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained(__UpperCamelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__UpperCamelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_UpperCAmelCase = model.config.max_length - 1
else:
_UpperCAmelCase = model.config.max_length
_UpperCAmelCase = tokenizer(
__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors="pt" , return_attention_mask=__UpperCamelCase , ).to(__UpperCamelCase )
_UpperCAmelCase = encodings["input_ids"]
_UpperCAmelCase = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_UpperCAmelCase = []
_UpperCAmelCase = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(__UpperCamelCase ) , __UpperCamelCase ) ):
_UpperCAmelCase = min(start_index + batch_size , len(__UpperCamelCase ) )
_UpperCAmelCase = encoded_texts[start_index:end_index]
_UpperCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
_UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__UpperCamelCase )
_UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_UpperCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__UpperCamelCase ), attn_mask] , dim=1 )
_UpperCAmelCase = encoded_batch
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase ).logits
_UpperCAmelCase = out_logits[..., :-1, :].contiguous()
_UpperCAmelCase = labels[..., 1:].contiguous()
_UpperCAmelCase = attn_mask[..., 1:].contiguous()
_UpperCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __UpperCamelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__UpperCamelCase )}
| 129
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase: Tuple = logging.get_logger(__name__)
__UpperCamelCase: List[Any] = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A = "trocr"
_A = ["past_key_values"]
_A = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self: List[str], lowerCamelCase_: Dict=50265, lowerCamelCase_: List[str]=1024, lowerCamelCase_: Optional[Any]=12, lowerCamelCase_: int=16, lowerCamelCase_: int=4096, lowerCamelCase_: Any="gelu", lowerCamelCase_: int=512, lowerCamelCase_: Optional[Any]=0.1, lowerCamelCase_: Dict=0.0, lowerCamelCase_: Union[str, Any]=0.0, lowerCamelCase_: Optional[Any]=2, lowerCamelCase_: Dict=0.0_2, lowerCamelCase_: Optional[int]=0.0, lowerCamelCase_: Union[str, Any]=True, lowerCamelCase_: List[str]=False, lowerCamelCase_: Optional[Any]=True, lowerCamelCase_: List[Any]=True, lowerCamelCase_: List[Any]=1, lowerCamelCase_: Any=0, lowerCamelCase_: int=2, **lowerCamelCase_: Optional[Any], ):
lowercase__ : str = vocab_size
lowercase__ : Optional[Any] = d_model
lowercase__ : Optional[Any] = decoder_layers
lowercase__ : List[str] = decoder_attention_heads
lowercase__ : Tuple = decoder_ffn_dim
lowercase__ : Union[str, Any] = activation_function
lowercase__ : str = max_position_embeddings
lowercase__ : Optional[int] = dropout
lowercase__ : Union[str, Any] = attention_dropout
lowercase__ : Dict = activation_dropout
lowercase__ : Union[str, Any] = init_std
lowercase__ : Any = decoder_layerdrop
lowercase__ : Optional[Any] = use_cache
lowercase__ : Dict = scale_embedding
lowercase__ : Union[str, Any] = use_learned_position_embeddings
lowercase__ : Union[str, Any] = layernorm_embedding
super().__init__(
pad_token_id=lowerCamelCase_, bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, decoder_start_token_id=lowerCamelCase_, **lowerCamelCase_, )
| 266
|
def SCREAMING_SNAKE_CASE__ ( _lowercase : int , _lowercase : int ) -> Optional[int]:
'''simple docstring'''
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) )
else:
return a * actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) )
def SCREAMING_SNAKE_CASE__ ( _lowercase : int , _lowercase : int ) -> float:
'''simple docstring'''
if b < 0:
return 1 / actual_power(_lowercase , _lowercase )
return actual_power(_lowercase , _lowercase )
if __name__ == "__main__":
print(power(-2, -3))
| 266
| 1
|
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowercase :Optional[Any] = VQModel
__lowercase :List[Any] = "sample"
@property
def _lowerCAmelCase ( self , UpperCamelCase__=(32, 32) ) -> int:
'''simple docstring'''
lowerCamelCase_ = 4
lowerCamelCase_ = 3
lowerCamelCase_ = floats_tensor((batch_size, num_channels) + sizes ).to(__a )
return {"sample": image}
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
return (3, 32, 32)
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
lowerCamelCase_ = self.dummy_input
return init_dict, inputs_dict
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(__a )
lowerCamelCase_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(__a ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCamelCase_ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
lowerCamelCase_ = image.to(__a )
with torch.no_grad():
lowerCamelCase_ = model(__a ).sample
lowerCamelCase_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCamelCase_ = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=1e-3 ) )
| 707
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase : Union[str, Any] = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 66
| 0
|
import gc
import threading
import time
import psutil
import torch
class lowerCamelCase__ :
def __init__( self : List[Any] ):
_lowerCAmelCase = psutil.Process()
_lowerCAmelCase = False
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = -1
while True:
_lowerCAmelCase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = True
_lowerCAmelCase = threading.Thread(target=self.peak_monitor )
_lowerCAmelCase = True
self.thread.start()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase = False
self.thread.join()
return self.cpu_memory_peak
_lowercase: List[str] = PeakCPUMemory()
def _lowerCamelCase ( ):
# Time
_lowerCAmelCase = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_lowerCAmelCase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
_lowerCAmelCase = torch.cuda.memory_allocated(snake_case )
torch.cuda.reset_peak_memory_stats()
return measures
def _lowerCamelCase ( snake_case ):
# Time
_lowerCAmelCase = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_lowerCAmelCase = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
_lowerCAmelCase = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
_lowerCAmelCase = (torch.cuda.memory_allocated(snake_case ) - start_measures[str(snake_case )]) / 2**20
_lowerCAmelCase = (torch.cuda.max_memory_allocated(snake_case ) - start_measures[str(snake_case )]) / 2**20
return measures
def _lowerCamelCase ( snake_case , snake_case ):
print(F'{description}:' )
print(F'- Time: {measures["time"]:.2f}s' )
for i in range(torch.cuda.device_count() ):
print(F'- GPU {i} allocated: {measures[str(snake_case )]:.2f}MiB' )
_lowerCAmelCase = measures[F'{i}-peak']
print(F'- GPU {i} peak: {peak:.2f}MiB' )
print(F'- CPU RAM allocated: {measures["cpu"]:.2f}MiB' )
print(F'- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB' )
| 192
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase: Dict = logging.get_logger(__name__)
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_lowerCAmelCase = [144, 192, 240]
_lowerCAmelCase = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_lowerCAmelCase = [96, 120, 144]
_lowerCAmelCase = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_lowerCAmelCase = [64, 80, 96]
_lowerCAmelCase = [16, 16, 24, 48, 64, 80, 320]
_lowerCAmelCase = 0.05
_lowerCAmelCase = 2.0
if mobilevit_name.startswith('deeplabv3_' ):
_lowerCAmelCase = 512
_lowerCAmelCase = 16
_lowerCAmelCase = 21
_lowerCAmelCase = 'pascal-voc-id2label.json'
else:
_lowerCAmelCase = 1_000
_lowerCAmelCase = 'imagenet-1k-id2label.json'
_lowerCAmelCase = 'huggingface/label-files'
_lowerCAmelCase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase = {int(snake_case ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase ( snake_case , snake_case=False ):
for i in range(1 , 6 ):
if F'layer_{i}.' in name:
_lowerCAmelCase = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.' )
if "conv_1." in name:
_lowerCAmelCase = name.replace('conv_1.' , 'conv_stem.' )
if ".block." in name:
_lowerCAmelCase = name.replace('.block.' , '.' )
if "exp_1x1" in name:
_lowerCAmelCase = name.replace('exp_1x1' , 'expand_1x1' )
if "red_1x1" in name:
_lowerCAmelCase = name.replace('red_1x1' , 'reduce_1x1' )
if ".local_rep.conv_3x3." in name:
_lowerCAmelCase = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' )
if ".local_rep.conv_1x1." in name:
_lowerCAmelCase = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' )
if ".norm." in name:
_lowerCAmelCase = name.replace('.norm.' , '.normalization.' )
if ".conv." in name:
_lowerCAmelCase = name.replace('.conv.' , '.convolution.' )
if ".conv_proj." in name:
_lowerCAmelCase = name.replace('.conv_proj.' , '.conv_projection.' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F'.{i}.{j}.' in name:
_lowerCAmelCase = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F'.{i}.{j}.' in name:
_lowerCAmelCase = name.replace(F'.{i}.{j}.' , F'.{i}.' )
if "expand_1x1" in name:
_lowerCAmelCase = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' )
if "conv_3x3" in name:
_lowerCAmelCase = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' )
if "reduce_1x1" in name:
_lowerCAmelCase = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' )
for i in range(2 , 5 ):
if F'.global_rep.{i}.weight' in name:
_lowerCAmelCase = name.replace(F'.global_rep.{i}.weight' , '.layernorm.weight' )
if F'.global_rep.{i}.bias' in name:
_lowerCAmelCase = name.replace(F'.global_rep.{i}.bias' , '.layernorm.bias' )
if ".global_rep." in name:
_lowerCAmelCase = name.replace('.global_rep.' , '.transformer.' )
if ".pre_norm_mha.0." in name:
_lowerCAmelCase = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' )
if ".pre_norm_mha.1.out_proj." in name:
_lowerCAmelCase = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' )
if ".pre_norm_ffn.0." in name:
_lowerCAmelCase = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' )
if ".pre_norm_ffn.1." in name:
_lowerCAmelCase = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' )
if ".pre_norm_ffn.4." in name:
_lowerCAmelCase = name.replace('.pre_norm_ffn.4.' , '.output.dense.' )
if ".transformer." in name:
_lowerCAmelCase = name.replace('.transformer.' , '.transformer.layer.' )
if ".aspp_layer." in name:
_lowerCAmelCase = name.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in name:
_lowerCAmelCase = name.replace('.aspp_pool.' , '.' )
if "seg_head." in name:
_lowerCAmelCase = name.replace('seg_head.' , 'segmentation_head.' )
if "segmentation_head.classifier.classifier." in name:
_lowerCAmelCase = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' )
if "classifier.fc." in name:
_lowerCAmelCase = name.replace('classifier.fc.' , 'classifier.' )
elif (not base_model) and ("segmentation_head." not in name):
_lowerCAmelCase = 'mobilevit.' + name
return name
def _lowerCamelCase ( snake_case , snake_case , snake_case=False ):
if base_model:
_lowerCAmelCase = ''
else:
_lowerCAmelCase = 'mobilevit.'
for key in orig_state_dict.copy().keys():
_lowerCAmelCase = orig_state_dict.pop(snake_case )
if key[:8] == "encoder.":
_lowerCAmelCase = key[8:]
if "qkv" in key:
_lowerCAmelCase = key.split('.' )
_lowerCAmelCase = int(key_split[0][6:] ) - 1
_lowerCAmelCase = int(key_split[3] )
_lowerCAmelCase = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}' )
_lowerCAmelCase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_lowerCAmelCase = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[dim : dim * 2, :]
_lowerCAmelCase = val[-dim:, :]
else:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
else:
_lowerCAmelCase = val
return orig_state_dict
def _lowerCamelCase ( ):
_lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def _lowerCamelCase ( snake_case , snake_case , snake_case , snake_case=False ):
_lowerCAmelCase = get_mobilevit_config(snake_case )
# load original state_dict
_lowerCAmelCase = torch.load(snake_case , map_location='cpu' )
# load 🤗 model
if mobilevit_name.startswith('deeplabv3_' ):
_lowerCAmelCase = MobileViTForSemanticSegmentation(snake_case ).eval()
else:
_lowerCAmelCase = MobileViTForImageClassification(snake_case ).eval()
_lowerCAmelCase = convert_state_dict(snake_case , snake_case )
model.load_state_dict(snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
_lowerCAmelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_lowerCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' )
_lowerCAmelCase = model(**snake_case )
_lowerCAmelCase = outputs.logits
if mobilevit_name.startswith('deeplabv3_' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_lowerCAmelCase = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_lowerCAmelCase = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_lowerCAmelCase = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3, :3, :3] , snake_case , atol=1E-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
_lowerCAmelCase = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
_lowerCAmelCase = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
_lowerCAmelCase = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3] , snake_case , atol=1E-4 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case )
if push_to_hub:
_lowerCAmelCase = {
'mobilevit_s': 'mobilevit-small',
'mobilevit_xs': 'mobilevit-x-small',
'mobilevit_xxs': 'mobilevit-xx-small',
'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',
'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',
'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',
}
print('Pushing to the hub...' )
_lowerCAmelCase = model_mapping[mobilevit_name]
image_processor.push_to_hub(snake_case , organization='apple' )
model.push_to_hub(snake_case , organization='apple' )
if __name__ == "__main__":
_lowercase: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowercase: List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 192
| 1
|
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def __UpperCAmelCase ( A : Optional[int] ) -> str:
return EnvironmentCommand()
class snake_case__ ( UpperCamelCase):
@staticmethod
def A ( _A : ArgumentParser ) -> Any:
UpperCAmelCase_ : Optional[int] = parser.add_parser('''env''' )
download_parser.set_defaults(func=_A )
def A ( self : List[str] ) -> str:
UpperCAmelCase_ : Optional[Any] = huggingface_hub.__version__
UpperCAmelCase_ : Dict = '''not installed'''
UpperCAmelCase_ : Any = '''NA'''
if is_torch_available():
import torch
UpperCAmelCase_ : Dict = torch.__version__
UpperCAmelCase_ : str = torch.cuda.is_available()
UpperCAmelCase_ : Optional[int] = '''not installed'''
if is_transformers_available():
import transformers
UpperCAmelCase_ : Optional[int] = transformers.__version__
UpperCAmelCase_ : Optional[Any] = '''not installed'''
if is_accelerate_available():
import accelerate
UpperCAmelCase_ : List[Any] = accelerate.__version__
UpperCAmelCase_ : Any = '''not installed'''
if is_xformers_available():
import xformers
UpperCAmelCase_ : List[Any] = xformers.__version__
UpperCAmelCase_ : int = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': F"{pt_version} ({pt_cuda_available})",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_A ) )
return info
@staticmethod
def A ( _A : Any ) -> Any:
return "\n".join([F"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 216
|
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __UpperCAmelCase ( A : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : Dict = {}
UpperCAmelCase_ : List[Any] = job['''started_at''']
UpperCAmelCase_ : List[Any] = job['''completed_at''']
UpperCAmelCase_ : Optional[Any] = date_parser.parse(A )
UpperCAmelCase_ : List[Any] = date_parser.parse(A )
UpperCAmelCase_ : Any = round((end_datetime - start_datetime).total_seconds() / 60.0 )
UpperCAmelCase_ : Any = start
UpperCAmelCase_ : Dict = end
UpperCAmelCase_ : Tuple = duration_in_min
return job_info
def __UpperCAmelCase ( A : int , A : int=None ) -> List[str]:
UpperCAmelCase_ : Tuple = None
if token is not None:
UpperCAmelCase_ : Any = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"Bearer {token}"}
UpperCAmelCase_ : Union[str, Any] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
UpperCAmelCase_ : Any = requests.get(A , headers=A ).json()
UpperCAmelCase_ : Any = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(A ) for job in result['''jobs''']} )
UpperCAmelCase_ : Union[str, Any] = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(A ):
UpperCAmelCase_ : List[str] = requests.get(url + F"&page={i + 2}" , headers=A ).json()
job_time.update({job['''name''']: extract_time_from_single_job(A ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
if __name__ == "__main__":
_UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
_UpperCamelCase : str = parser.parse_args()
_UpperCamelCase : str = get_job_time(args.workflow_run_id)
_UpperCamelCase : Optional[int] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v["duration"]}''')
| 216
| 1
|
'''simple docstring'''
lowerCAmelCase__ = {str(digit): digit**5 for digit in range(10)}
def __UpperCAmelCase ( lowerCamelCase_) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(__a))
def __UpperCAmelCase ( ) -> int:
return sum(
number
for number in range(1_000 , 1_000_000)
if number == digits_fifth_powers_sum(__a))
if __name__ == "__main__":
print(solution())
| 596
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : str , lowercase_ : Dict=13 , lowercase_ : Dict=32 , lowercase_ : Any=3 , lowercase_ : Any=4 , lowercase_ : int=[10, 20, 30, 40] , lowercase_ : Union[str, Any]=[2, 2, 3, 2] , lowercase_ : Optional[int]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Dict=37 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Union[str, Any]=10 , lowercase_ : Optional[int]=0.02 , lowercase_ : Tuple=["stage2", "stage3", "stage4"] , lowercase_ : Optional[Any]=[2, 3, 4] , lowercase_ : Optional[int]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : List[Any] = image_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : List[Any] = num_stages
SCREAMING_SNAKE_CASE_ : List[str] = hidden_sizes
SCREAMING_SNAKE_CASE_ : Optional[int] = depths
SCREAMING_SNAKE_CASE_ : str = is_training
SCREAMING_SNAKE_CASE_ : int = use_labels
SCREAMING_SNAKE_CASE_ : Dict = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Dict = out_features
SCREAMING_SNAKE_CASE_ : List[str] = out_indices
SCREAMING_SNAKE_CASE_ : int = scope
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels)
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowercase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = ConvNextModel(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowercase_)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : str , lowercase_ : Dict , lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = ConvNextForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = ConvNextBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(lowercase_)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ConvNextBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowercase_)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = ConvNextModelTester(self)
SCREAMING_SNAKE_CASE_ : int = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''')
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''')
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''')
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Dict = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : List[str] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
def check_hidden_states_output(lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : str):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_))
SCREAMING_SNAKE_CASE_ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.num_stages
self.assertEqual(len(lowercase_) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : str = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : int = ConvNextModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def _A () -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''') if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''').to(lowercase_)
SCREAMING_SNAKE_CASE_ : str = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_ : List[Any] = image_processor(images=lowercase_ , return_tensors='''pt''').to(lowercase_)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = model(**lowercase_)
# verify the logits
SCREAMING_SNAKE_CASE_ : List[Any] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase_)
SCREAMING_SNAKE_CASE_ : int = torch.tensor([-0.02_60, -0.47_39, 0.19_11]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
@require_torch
class lowerCAmelCase__ ( unittest.TestCase , UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = (ConvNextBackbone,) if is_torch_available() else ()
__UpperCamelCase = ConvNextConfig
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = ConvNextModelTester(self)
| 512
| 0
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def UpperCamelCase__ ( _A: List[Any] ): # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def UpperCamelCase__ ( _A: Tuple ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
A = 42
A = 42
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
def lowerCamelCase_ ( self ):
__lowerCamelCase = {}
__lowerCamelCase = []
__lowerCamelCase = 1
__lowerCamelCase = [1, 2]
__lowerCamelCase = {"""a""": 1, """b""": 2}
__lowerCamelCase = {"""a""": [1, 2], """b""": [3, 4]}
__lowerCamelCase = {"""a""": {"""1""": 1}, """b""": 2}
__lowerCamelCase = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
__lowerCamelCase = {}
__lowerCamelCase = []
__lowerCamelCase = 2
__lowerCamelCase = [2, 3]
__lowerCamelCase = {"""a""": 2, """b""": 3}
__lowerCamelCase = {"""a""": [2, 3], """b""": [4, 5]}
__lowerCamelCase = {"""a""": {"""1""": 2}, """b""": 3}
__lowerCamelCase = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
__lowerCamelCase = 2
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
__lowerCamelCase = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
__lowerCamelCase = {"""a""": 2, """b""": 0, """c""": 2}
__lowerCamelCase = {
"""a""": np.eye(2 ).astype(UpperCAmelCase ),
"""b""": np.zeros(3 ).astype(UpperCAmelCase ),
"""c""": np.ones(2 ).astype(UpperCAmelCase ),
}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(UpperCAmelCase ): # can't pickle a local lambda
map_nested(lambda UpperCAmelCase : x + 1 , UpperCAmelCase , num_proc=UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = {"""a""": 1, """b""": 2}
__lowerCamelCase = {"""a""": 3, """b""": 4}
__lowerCamelCase = {"""a""": 5, """b""": 6}
__lowerCamelCase = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) , UpperCAmelCase )
def lowerCamelCase_ ( self ):
class UpperCamelCase_ :
"""simple docstring"""
A = 'bar'
__lowerCamelCase = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(UpperCAmelCase , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def UpperCamelCase__ ( _A: int , _A: str , _A: List[str] ):
'''simple docstring'''
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
__lowerCamelCase = {f'''{i}''': i for i in range(_SCREAMING_SNAKE_CASE )}
__lowerCamelCase = map_nested(lambda _A : x + 10 , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
@require_tf
def lowerCamelCase_ ( self ):
import tensorflow as tf
from tensorflow.keras import layers
__lowerCamelCase = layers.Dense(2 )
def gen_random_output():
__lowerCamelCase = tf.random.uniform((1, 3) )
return model(UpperCAmelCase ).numpy()
with temp_seed(4_2 , set_tensorflow=UpperCAmelCase ):
__lowerCamelCase = gen_random_output()
with temp_seed(4_2 , set_tensorflow=UpperCAmelCase ):
__lowerCamelCase = gen_random_output()
__lowerCamelCase = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def lowerCamelCase_ ( self ):
import torch
def gen_random_output():
__lowerCamelCase = torch.nn.Linear(3 , 2 )
__lowerCamelCase = torch.rand(1 , 3 )
return model(UpperCAmelCase ).detach().numpy()
with temp_seed(4_2 , set_pytorch=UpperCAmelCase ):
__lowerCamelCase = gen_random_output()
with temp_seed(4_2 , set_pytorch=UpperCAmelCase ):
__lowerCamelCase = gen_random_output()
__lowerCamelCase = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def lowerCamelCase_ ( self ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
__lowerCamelCase = gen_random_output()
with temp_seed(4_2 ):
__lowerCamelCase = gen_random_output()
__lowerCamelCase = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def UpperCamelCase__ ( _A: Dict ):
'''simple docstring'''
__lowerCamelCase = NestedDataStructure(_SCREAMING_SNAKE_CASE ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def UpperCamelCase__ ( _A: List[str] , _A: Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = NestedDataStructure(_SCREAMING_SNAKE_CASE ).flatten()
assert output == expected_output
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = A(x=1 , y="""foobar""" )
__lowerCamelCase = {"""x""": 1, """y""": """foobar"""}
assert asdict(_SCREAMING_SNAKE_CASE ) == expected_output
__lowerCamelCase = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
__lowerCamelCase = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(_SCREAMING_SNAKE_CASE ) == expected_output
with pytest.raises(_SCREAMING_SNAKE_CASE ):
asdict([1, A(x=10 , y="""foo""" )] )
def UpperCamelCase__ ( _A: Optional[int] ):
'''simple docstring'''
return text.split()
def UpperCamelCase__ ( _A: List[Any] ):
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def UpperCamelCase__ ( ):
'''simple docstring'''
with Pool(2 ) as pool:
__lowerCamelCase = list(iflatmap_unordered(_SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(_SCREAMING_SNAKE_CASE ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
__lowerCamelCase = list(iflatmap_unordered(_SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(_SCREAMING_SNAKE_CASE ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
__lowerCamelCase = []
for yield_time, content in iflatmap_unordered(
_SCREAMING_SNAKE_CASE , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_SCREAMING_SNAKE_CASE )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(_SCREAMING_SNAKE_CASE ) == 4
| 701
|
from __future__ import annotations
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self , UpperCAmelCase ):
__lowerCamelCase = data
__lowerCamelCase = None
__lowerCamelCase = None
def UpperCamelCase__ ( _A: Node | None ): # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def UpperCamelCase__ ( _A: Node | None ):
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def UpperCamelCase__ ( _A: Node ):
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def UpperCamelCase__ ( ): # Main function for testing.
'''simple docstring'''
__lowerCamelCase = Node(1 )
__lowerCamelCase = Node(2 )
__lowerCamelCase = Node(3 )
__lowerCamelCase = Node(4 )
__lowerCamelCase = Node(5 )
__lowerCamelCase = Node(6 )
__lowerCamelCase = Node(7 )
__lowerCamelCase = Node(8 )
__lowerCamelCase = Node(9 )
print(is_full_binary_tree(_A ) )
print(depth_of_tree(_A ) )
print("""Tree is: """ )
display(_A )
if __name__ == "__main__":
main()
| 571
| 0
|
def a__ ( snake_case ):
"""simple docstring"""
return "".join(chr(ord(snake_case ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 74
|
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
lowerCamelCase_ = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
lowerCamelCase_ = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()
lowerCamelCase_ = -(labels.shape[-1] * loss.item())
lowerCamelCase_ = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 29
| 0
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
_A = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def lowercase (_snake_case ) -> Dict:
'''simple docstring'''
__UpperCamelCase = torch.load(snake_case__ ,map_location="cpu" )
return sd
def lowercase (_snake_case ,_snake_case ,_snake_case=rename_keys_prefix ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = OrderedDict()
__UpperCamelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__UpperCamelCase = key
for name_pair in rename_keys_prefix:
__UpperCamelCase = new_key.replace(name_pair[0] ,name_pair[1] )
__UpperCamelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__UpperCamelCase = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowercase (_snake_case ,_snake_case ) -> int:
'''simple docstring'''
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
__UpperCamelCase = "pretraining"
if "vcr" in checkpoint_path:
__UpperCamelCase = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__UpperCamelCase = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
__UpperCamelCase = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
__UpperCamelCase = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
__UpperCamelCase = {"visual_embedding_dim": 512}
__UpperCamelCase = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__UpperCamelCase = {"visual_embedding_dim": 2048}
__UpperCamelCase = "vqa_advanced"
elif "vqa" in checkpoint_path:
__UpperCamelCase = {"visual_embedding_dim": 2048, "num_labels": 3129}
__UpperCamelCase = "vqa"
elif "nlvr" in checkpoint_path:
__UpperCamelCase = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
__UpperCamelCase = "nlvr"
__UpperCamelCase = VisualBertConfig(**snake_case__ )
# Load State Dict
__UpperCamelCase = load_state_dict(snake_case__ )
__UpperCamelCase = get_new_dict(snake_case__ ,snake_case__ )
if model_type == "pretraining":
__UpperCamelCase = VisualBertForPreTraining(snake_case__ )
elif model_type == "vqa":
__UpperCamelCase = VisualBertForQuestionAnswering(snake_case__ )
elif model_type == "nlvr":
__UpperCamelCase = VisualBertForVisualReasoning(snake_case__ )
elif model_type == "multichoice":
__UpperCamelCase = VisualBertForMultipleChoice(snake_case__ )
model.load_state_dict(snake_case__ )
# Save Checkpoints
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
_A = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 717
|
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_A = logging.get_logger(__name__)
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Optional[int] = ['audio_values', 'audio_mask']
def __init__( self : Dict , A_ : Optional[int]=20_48 , A_ : Union[str, Any]=1 , A_ : Dict=[16, 16] , A_ : Optional[Any]=1_28 , A_ : str=4_41_00 , A_ : Optional[int]=86 , A_ : Tuple=20_48 , A_ : Union[str, Any]=0.0 , **A_ : List[str] , )-> Optional[Any]:
super().__init__(
feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ , )
__UpperCamelCase = spectrogram_length
__UpperCamelCase = num_channels
__UpperCamelCase = patch_size
__UpperCamelCase = feature_size // self.patch_size[1]
__UpperCamelCase = n_fft
__UpperCamelCase = sampling_rate // hop_length_to_sampling_rate
__UpperCamelCase = sampling_rate
__UpperCamelCase = padding_value
__UpperCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A_ , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=A_ , norm="slaney" , mel_scale="slaney" , ).T
def A ( self : Union[str, Any] , A_ : np.array )-> np.ndarray:
__UpperCamelCase = spectrogram(
A_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
__UpperCamelCase = log_spec[:, :-1]
__UpperCamelCase = log_spec - 20.0
__UpperCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : str , A_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , A_ : Optional[Union[str, TensorType]] = None , A_ : Optional[bool] = True , A_ : Optional[int] = None , A_ : bool = False , A_ : bool = False , **A_ : str , )-> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
__UpperCamelCase = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__UpperCamelCase = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
__UpperCamelCase = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCamelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__UpperCamelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A_ ):
__UpperCamelCase = [np.asarray(A_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__UpperCamelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__UpperCamelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__UpperCamelCase = np.array(A_ ).astype(np.floataa )
# convert into correct format for padding
__UpperCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__UpperCamelCase = np.ones([len(A_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__UpperCamelCase = padded_audio_features * self.padding_value
for i in range(len(A_ ) ):
__UpperCamelCase = audio_features[i]
__UpperCamelCase = feature
# return as BatchFeature
if return_attention_mask:
__UpperCamelCase = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
__UpperCamelCase = {"audio_values": padded_audio_features}
__UpperCamelCase = BatchFeature(data=A_ , tensor_type=A_ )
return encoded_inputs
| 228
| 0
|
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__UpperCamelCase = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
__UpperCamelCase = []
__UpperCamelCase = []
__UpperCamelCase = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
__UpperCamelCase = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": f"""🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results""",
"emoji": True,
},
}
]
__UpperCamelCase = 0
for log in Path().glob("*.log"):
__UpperCamelCase = 0
with open(log, "r") as f:
for line in f:
__UpperCamelCase = json.loads(line)
if line.get("nodeid", "") != "":
__UpperCamelCase = line["nodeid"]
if line.get("duration", None) is not None:
__UpperCamelCase = f"""{line['duration']:.4f}"""
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__UpperCamelCase = []
log.unlink()
__UpperCamelCase = ""
__UpperCamelCase = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
__UpperCamelCase = []
__UpperCamelCase = {}
for test in failed_tests:
__UpperCamelCase = test[0].split("::")
__UpperCamelCase = data[0].split("/")[-1]
if data[0] not in filesafailed:
__UpperCamelCase = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__UpperCamelCase = [test[0] for test in failed_table]
__UpperCamelCase = list(set(files))
# Count number of instances in failed_tests
__UpperCamelCase = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
__UpperCamelCase = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
__UpperCamelCase = "Too many failed tests, please see the full report in the Action results."
__UpperCamelCase = len(err) + 10
__UpperCamelCase = message[: 3000 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
__UpperCamelCase = "No failed tests! 🤗"
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
__UpperCamelCase = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
__UpperCamelCase = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
__UpperCamelCase = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f"""https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
payload.append(action_button)
__UpperCamelCase = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f"""Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}""",
}
],
}
payload.append(date_report)
__UpperCamelCase = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
__UpperCamelCase = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__UpperCamelCase = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
__UpperCamelCase = row[0]
else:
__UpperCamelCase = ""
__UpperCamelCase = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```""",
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 26
|
"""simple docstring"""
__A : Optional[int] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__A : str = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__A : str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 499
| 0
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
UpperCamelCase : Any = "base_with_context"
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
"""simple docstring"""
A = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
A = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
A = weights[f'layers_{lyr_num}']
A = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
A = ly_weight['attention']
A = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Any:
"""simple docstring"""
A = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
A = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
A = weights[f'layers_{lyr_num}']
A = ly_weight['attention']
A = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
A = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
A = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
A = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
A = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__UpperCAmelCase )
A = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
A = weights[f'layers_{lyr_num}']
A = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
A = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
A = ly_weight['self_attention']
A = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
A = ly_weight['MultiHeadDotProductAttention_0']
A = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
A = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
A = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
A = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
A = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def __snake_case ( UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
A = checkpoints.load_tax_checkpoint(args.checkpoint_path )
A = jnp.tree_util.tree_map(onp.array , __UpperCAmelCase )
A = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
A = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
A = inference.parse_training_gin_file(__UpperCAmelCase , __UpperCAmelCase )
A = inference.InferenceModel(args.checkpoint_path , __UpperCAmelCase )
A = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
A = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
A = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
A = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
A = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , __UpperCAmelCase )
A = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , __UpperCAmelCase )
A = load_decoder(ta_checkpoint['target']['decoder'] , __UpperCAmelCase )
A = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
A = SpectrogramDiffusionPipeline(
notes_encoder=__UpperCAmelCase , continuous_encoder=__UpperCAmelCase , decoder=__UpperCAmelCase , scheduler=__UpperCAmelCase , melgan=__UpperCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
UpperCamelCase : List[str] = parser.parse_args()
main(args)
| 713
|
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
A = Mock()
A = conn, Mock()
A = iter([1, None] )
A = lambda UpperCamelCase__ : next(UpperCamelCase__ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=UpperCamelCase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 91
| 0
|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( __snake_case ):
def __init__(self : Optional[Any] , *snake_case__ : str , **snake_case__ : str ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 204
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['LayoutLMv2FeatureExtractor']
lowercase_ = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 552
| 0
|
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
_lowerCamelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
_lowerCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any]=100 , UpperCAmelCase_ : Optional[Any]=" " )-> List[str]:
"""simple docstring"""
a =text.split(UpperCAmelCase_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(UpperCAmelCase_ ) , UpperCAmelCase_ )]
def lowerCamelCase ( UpperCAmelCase_ : dict )-> dict:
"""simple docstring"""
a , a =[], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(UpperCAmelCase_ ):
titles.append(title if title is not None else """""" )
texts.append(UpperCAmelCase_ )
return {"title": titles, "text": texts}
def lowerCamelCase ( UpperCAmelCase_ : dict , UpperCAmelCase_ : DPRContextEncoder , UpperCAmelCase_ : DPRContextEncoderTokenizerFast )-> dict:
"""simple docstring"""
a =ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=UpperCAmelCase_ , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
a =ctx_encoder(input_ids.to(device=UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase ( UpperCAmelCase_ : "RagExampleArguments" , UpperCAmelCase_ : "ProcessingArguments" , UpperCAmelCase_ : "IndexHnswArguments" , )-> int:
"""simple docstring"""
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
a =load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
a =dataset.map(UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=processing_args.num_proc )
# And compute the embeddings
a =DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=UpperCAmelCase_ )
a =DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
a =Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
a =dataset.map(
partial(UpperCAmelCase_ , ctx_encoder=UpperCAmelCase_ , ctx_tokenizer=UpperCAmelCase_ ) , batched=UpperCAmelCase_ , batch_size=processing_args.batch_size , features=UpperCAmelCase_ , )
# And finally save your dataset
a =os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(UpperCAmelCase_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
a =faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=UpperCAmelCase_ )
# And save the index
a =os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(UpperCAmelCase_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCAmelCase__ :
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = field(
default=str(Path(UpperCAmelCase__ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
_SCREAMING_SNAKE_CASE : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
_SCREAMING_SNAKE_CASE : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=str(Path(UpperCAmelCase__ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class UpperCAmelCase__ :
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
_SCREAMING_SNAKE_CASE : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class UpperCAmelCase__ :
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
_SCREAMING_SNAKE_CASE : int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
_lowerCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
_lowerCamelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 321
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_lowerCamelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCAmelCase__ ( datasets.BuilderConfig ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
_SCREAMING_SNAKE_CASE : str = "utf-8"
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : bool = True # deprecated
_SCREAMING_SNAKE_CASE : Optional[int] = None # deprecated
_SCREAMING_SNAKE_CASE : int = 10 << 20 # 10MB
_SCREAMING_SNAKE_CASE : Optional[bool] = None
class UpperCAmelCase__ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = JsonConfig
def lowerCAmelCase__ ( self ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
a =self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self , _lowerCAmelCase ):
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
a =dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCAmelCase , (str, list, tuple) ):
a =data_files
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
a =[files]
a =[dl_manager.iter_files(_lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
a =[]
for split_name, files in data_files.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
a =[files]
a =[dl_manager.iter_files(_lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCAmelCase , gen_kwargs={"""files""": files} ) )
return splits
def lowerCAmelCase__ ( self , _lowerCAmelCase ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
a =self.config.features.arrow_schema.field(_lowerCAmelCase ).type
a =pa_table.append_column(_lowerCAmelCase , pa.array([None] * len(_lowerCAmelCase ) , type=_lowerCAmelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
a =table_cast(_lowerCAmelCase , self.config.features.arrow_schema )
return pa_table
def lowerCAmelCase__ ( self , _lowerCAmelCase ):
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
a =json.load(_lowerCAmelCase )
# We keep only the field we are interested in
a =dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_lowerCAmelCase , (list, tuple) ):
a =set().union(*[row.keys() for row in dataset] )
a ={col: [row.get(_lowerCAmelCase ) for row in dataset] for col in keys}
else:
a =dataset
a =pa.Table.from_pydict(_lowerCAmelCase )
yield file_idx, self._cast_table(_lowerCAmelCase )
# If the file has one json object per line
else:
with open(_lowerCAmelCase , """rb""" ) as f:
a =0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
a =max(self.config.chunksize // 32 , 16 << 10 )
a =(
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
a =f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_lowerCAmelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
a =batch.decode(self.config.encoding , errors=_lowerCAmelCase ).encode("""utf-8""" )
try:
while True:
try:
a =paj.read_json(
io.BytesIO(_lowerCAmelCase ) , read_options=paj.ReadOptions(block_size=_lowerCAmelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_lowerCAmelCase , pa.ArrowInvalid )
and "straddling" not in str(_lowerCAmelCase )
or block_size > len(_lowerCAmelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'''Batch of {len(_lowerCAmelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
a =json.load(_lowerCAmelCase )
except json.JSONDecodeError:
logger.error(F'''Failed to read file \'{file}\' with error {type(_lowerCAmelCase )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_lowerCAmelCase , _lowerCAmelCase ): # list is the only sequence type supported in JSON
try:
a =set().union(*[row.keys() for row in dataset] )
a ={col: [row.get(_lowerCAmelCase ) for row in dataset] for col in keys}
a =pa.Table.from_pydict(_lowerCAmelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_lowerCAmelCase )}: {e}''' )
raise ValueError(F'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(_lowerCAmelCase )
break
else:
logger.error(F'''Failed to read file \'{file}\' with error {type(_lowerCAmelCase )}: {e}''' )
raise ValueError(
F'''Not able to read records in the JSON file at {file}. '''
F'''You should probably indicate the field of the JSON file containing your records. '''
F'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
F'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCAmelCase )
batch_idx += 1
| 321
| 1
|
from __future__ import annotations
def lowerCAmelCase_ ( __UpperCAmelCase: float , __UpperCAmelCase: float , __UpperCAmelCase: float , ) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253
|
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : int = CodeGenTokenizer
a : List[str] = CodeGenTokenizerFast
a : List[Any] = True
a : Optional[Any] = {"add_prefix_space": True}
a : Dict = False
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ : str = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
UpperCamelCase__ : Dict = dict(zip(__magic_name__, range(len(__magic_name__ ) ) ) )
UpperCamelCase__ : Dict = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCamelCase__ : Dict = {'''unk_token''': '''<unk>'''}
UpperCamelCase__ : str = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__magic_name__ ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def UpperCamelCase__ ( self, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname, **__magic_name__ )
def UpperCamelCase__ ( self, **__magic_name__ ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname, **__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = '''lower newer'''
UpperCamelCase__ : int = '''lower newer'''
return input_text, output_text
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Any = CodeGenTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ : List[Any] = '''lower newer'''
UpperCamelCase__ : Dict = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCamelCase__ : int = tokenizer.tokenize(__magic_name__, add_prefix_space=__magic_name__ )
self.assertListEqual(__magic_name__, __magic_name__ )
UpperCamelCase__ : Dict = tokens + [tokenizer.unk_token]
UpperCamelCase__ : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ), __magic_name__ )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : List[str] = self.get_rust_tokenizer(add_prefix_space=__magic_name__ )
UpperCamelCase__ : List[Any] = '''lower newer'''
# Testing tokenization
UpperCamelCase__ : Tuple = tokenizer.tokenize(__magic_name__, add_prefix_space=__magic_name__ )
UpperCamelCase__ : Tuple = rust_tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__, __magic_name__ )
# Testing conversion to ids without special tokens
UpperCamelCase__ : int = tokenizer.encode(__magic_name__, add_special_tokens=__magic_name__, add_prefix_space=__magic_name__ )
UpperCamelCase__ : str = rust_tokenizer.encode(__magic_name__, add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__, __magic_name__ )
# Testing conversion to ids with special tokens
UpperCamelCase__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=__magic_name__ )
UpperCamelCase__ : Dict = tokenizer.encode(__magic_name__, add_prefix_space=__magic_name__ )
UpperCamelCase__ : List[str] = rust_tokenizer.encode(__magic_name__ )
self.assertListEqual(__magic_name__, __magic_name__ )
# Testing the unknown token
UpperCamelCase__ : Optional[int] = tokens + [rust_tokenizer.unk_token]
UpperCamelCase__ : Union[str, Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__magic_name__ ), __magic_name__ )
def UpperCamelCase__ ( self, *__magic_name__, **__magic_name__ ) -> List[str]:
"""simple docstring"""
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def UpperCamelCase__ ( self, __magic_name__=15 ) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCamelCase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(__magic_name__, **__magic_name__ )
# Simple input
UpperCamelCase__ : int = '''This is a simple input'''
UpperCamelCase__ : Any = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCamelCase__ : Optional[int] = ('''This is a simple input''', '''This is a pair''')
UpperCamelCase__ : List[Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__magic_name__, tokenizer_r.encode, __magic_name__, max_length=__magic_name__, padding='''max_length''' )
# Simple input
self.assertRaises(__magic_name__, tokenizer_r.encode_plus, __magic_name__, max_length=__magic_name__, padding='''max_length''' )
# Simple input
self.assertRaises(
__magic_name__, tokenizer_r.batch_encode_plus, __magic_name__, max_length=__magic_name__, padding='''max_length''', )
# Pair input
self.assertRaises(__magic_name__, tokenizer_r.encode, __magic_name__, max_length=__magic_name__, padding='''max_length''' )
# Pair input
self.assertRaises(__magic_name__, tokenizer_r.encode_plus, __magic_name__, max_length=__magic_name__, padding='''max_length''' )
# Pair input
self.assertRaises(
__magic_name__, tokenizer_r.batch_encode_plus, __magic_name__, max_length=__magic_name__, padding='''max_length''', )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname, pad_token='''<pad>''' )
# Simple input
UpperCamelCase__ : Union[str, Any] = '''This is a simple input'''
UpperCamelCase__ : List[Any] = ['''This is a simple input looooooooong''', '''This is a simple input''']
UpperCamelCase__ : Any = ('''This is a simple input''', '''This is a pair''')
UpperCamelCase__ : str = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
UpperCamelCase__ : str = tokenizer.pad_token_id
UpperCamelCase__ : int = tokenizer(__magic_name__, padding='''max_length''', max_length=30, return_tensors='''np''' )
UpperCamelCase__ : Union[str, Any] = tokenizer(__magic_name__, padding=__magic_name__, truncate=__magic_name__, return_tensors='''np''' )
UpperCamelCase__ : str = tokenizer(*__magic_name__, padding='''max_length''', max_length=60, return_tensors='''np''' )
UpperCamelCase__ : str = tokenizer(__magic_name__, padding=__magic_name__, truncate=__magic_name__, return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1], 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1], 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1], 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1], 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = '''$$$'''
UpperCamelCase__ : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname, bos_token=__magic_name__, add_bos_token=__magic_name__ )
UpperCamelCase__ : Tuple = '''This is a simple input'''
UpperCamelCase__ : int = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCamelCase__ : List[Any] = tokenizer.bos_token_id
UpperCamelCase__ : Dict = tokenizer(__magic_name__ )
UpperCamelCase__ : Optional[Any] = tokenizer(__magic_name__ )
self.assertEqual(out_s.input_ids[0], __magic_name__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
UpperCamelCase__ : Optional[int] = tokenizer.decode(out_s.input_ids )
UpperCamelCase__ : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0], __magic_name__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
UpperCamelCase__ : Any = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
UpperCamelCase__ : List[str] = '''\nif len_a > len_b: result = a\nelse: result = b'''
UpperCamelCase__ : int = tokenizer.encode(__magic_name__ )
UpperCamelCase__ : Optional[int] = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
UpperCamelCase__ : Tuple = tokenizer.decode(__magic_name__, truncate_before_pattern=__magic_name__ )
self.assertEqual(__magic_name__, __magic_name__ )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
pass
| 253
| 1
|
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowerCamelCase = data_utils.TransfoXLTokenizer
_lowerCamelCase = data_utils.TransfoXLCorpus
_lowerCamelCase = data_utils
_lowerCamelCase = data_utils
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Tuple ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCamelCase__ , """rb""" ) as fp:
SCREAMING_SNAKE_CASE__ = pickle.load(UpperCamelCase__ , encoding="""latin1""" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(f'''Save vocabulary to {pytorch_vocab_dump_path}''' )
SCREAMING_SNAKE_CASE__ = corpus.vocab.__dict__
torch.save(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(f'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
SCREAMING_SNAKE_CASE__ = os.path.abspath(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = os.path.abspath(UpperCamelCase__ )
print(f'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
SCREAMING_SNAKE_CASE__ = TransfoXLConfig()
else:
SCREAMING_SNAKE_CASE__ = TransfoXLConfig.from_json_file(UpperCamelCase__ )
print(f'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE__ = TransfoXLLMHeadModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = load_tf_weights_in_transfo_xl(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(f'''Save PyTorch model to {os.path.abspath(UpperCamelCase__ )}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
print(f'''Save configuration file to {os.path.abspath(UpperCamelCase__ )}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
_lowerCamelCase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 708
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_lowerCamelCase = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=UpperCamelCase__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=UpperCamelCase__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=UpperCamelCase__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=UpperCamelCase__ , default=1_000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=UpperCamelCase__ , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=UpperCamelCase__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=UpperCamelCase__ , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ):
def fn(UpperCamelCase__: Any ):
return tokenizer(examples["""text"""] )
return fn
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
SCREAMING_SNAKE_CASE__ = tf.train.Features(feature=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = tf.train.Example(features=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = example.SerializeToString()
records.append(UpperCamelCase__ )
return records
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
SCREAMING_SNAKE_CASE__ = min(len(UpperCamelCase__ ) , args.limit )
SCREAMING_SNAKE_CASE__ = dataset.select(range(UpperCamelCase__ ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
SCREAMING_SNAKE_CASE__ = os.path.join(args.output_dir , args.split )
if not os.path.exists(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
SCREAMING_SNAKE_CASE__ = tokenize_function(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = dataset.map(UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(UpperCamelCase__: int ):
# Concatenate all texts.
SCREAMING_SNAKE_CASE__ = {k: sum(examples[k] , [] ) for k in examples.keys()}
SCREAMING_SNAKE_CASE__ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
SCREAMING_SNAKE_CASE__ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
SCREAMING_SNAKE_CASE__ = {
k: [t[i : i + args.max_length] for i in range(0 , UpperCamelCase__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
SCREAMING_SNAKE_CASE__ = dataset_tokenized.map(UpperCamelCase__ , batched=UpperCamelCase__ , batch_size=1_000 , num_proc=4 )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for shard in range(0 , len(UpperCamelCase__ ) , args.shard_size ):
SCREAMING_SNAKE_CASE__ = grouped_dataset[shard : shard + args.shard_size]
SCREAMING_SNAKE_CASE__ = len(dataset_snapshot["""input_ids"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
SCREAMING_SNAKE_CASE__ = get_serialized_examples(UpperCamelCase__ )
with tf.io.TFRecordWriter(UpperCamelCase__ ) as out_file:
for i in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE__ = serialized_examples[i]
out_file.write(UpperCamelCase__ )
print("""Wrote file {} containing {} records""".format(UpperCamelCase__ , UpperCamelCase__ ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , """w""" ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=UpperCamelCase__ )
if __name__ == "__main__":
_lowerCamelCase = parse_args()
main(args)
| 59
| 0
|
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 ):
A_ : Optional[int] = length or len(SCREAMING_SNAKE_CASE )
A_ : Tuple = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A_ , A_ : List[Any] = list_data[i + 1], list_data[i]
A_ : str = True
return list_data if not swapped else bubble_sort(SCREAMING_SNAKE_CASE , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 590
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : List[Any] = tempfile.mkdtemp()
A_ : List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
A_ : Tuple = {
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
'''do_convert_rgb''': True,
}
A_ : int = os.path.join(self.tmpdirname , _SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : List[Any] = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Optional[int] = self.get_tokenizer()
A_ : Any = self.get_rust_tokenizer()
A_ : Dict = self.get_image_processor()
A_ : str = ChineseCLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
A_ : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
A_ : Optional[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : List[str] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
A_ : Dict = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : List[Any] = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Tuple = ChineseCLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : Tuple = self.prepare_image_inputs()
A_ : str = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
A_ : List[str] = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : str = self.get_image_processor()
A_ : List[str] = self.get_tokenizer()
A_ : Tuple = ChineseCLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = '''Alexandra,T-shirt的价格是15便士。'''
A_ : Any = processor(text=_SCREAMING_SNAKE_CASE )
A_ : Tuple = tokenizer(_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : Union[str, Any] = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Dict = ChineseCLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = '''Alexandra,T-shirt的价格是15便士。'''
A_ : str = self.prepare_image_inputs()
A_ : Optional[Any] = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : int = self.get_image_processor()
A_ : List[str] = self.get_tokenizer()
A_ : Tuple = ChineseCLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : List[Any] = processor.batch_decode(_SCREAMING_SNAKE_CASE )
A_ : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : List[str] = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : List[Any] = ChineseCLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : Tuple = '''Alexandra,T-shirt的价格是15便士。'''
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Union[str, Any] = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 590
| 1
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCamelCase : int = datasets.logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
lowerCamelCase : Tuple = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
lowerCamelCase : Optional[Any] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] , lowercase : Any=False , lowercase : Any=False , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int="dummy_doc" ):
'''simple docstring'''
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , key_doc_lines[doc] , lowercase )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , sys_doc_lines[doc] , lowercase )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : List[str] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = get_coref_infos(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowercase , lowercase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 1_00:.2f}""" , f""" Precision: {precision * 1_00:.2f}""" , f""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 1_00
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A( datasets.Metric ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def a__ ( self : List[str] , A_ : Optional[Any] , A_ : Optional[int] , A_ : int=True , A_ : str=False , A_ : int=False , A_ : Union[str, Any]=False ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(A_ )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=A_ , sys_lines=A_ , metrics=A_ , NP_only=A_ , remove_nested=A_ , keep_singletons=A_ , min_span=A_ , )
return score
| 651
|
class A:
'''simple docstring'''
def __init__( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = {}
def a__ ( self : Union[str, Any] , A_ : List[Any] ) -> int:
"""simple docstring"""
if vertex not in self.adjacency:
lowerCamelCase_ = {}
self.num_vertices += 1
def a__ ( self : int , A_ : int , A_ : Optional[Any] , A_ : List[str] ) -> Tuple:
"""simple docstring"""
self.add_vertex(A_ )
self.add_vertex(A_ )
if head == tail:
return
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for i in range(len(A_ ) ):
lowerCamelCase_ = list(edges[i] )
edges.sort(key=lambda A_ : e[2] )
for i in range(len(A_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowerCamelCase_ = edges[i][2] + 1
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def __str__( self : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowerCamelCase_ = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def a__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def a__ ( A_ : Optional[Any]=None , A_ : List[str]=None ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = Graph()
if vertices is None:
lowerCamelCase_ = []
if edges is None:
lowerCamelCase_ = []
for vertex in vertices:
g.add_vertex(A_ )
for edge in edges:
g.add_edge(*A_ )
return g
class A:
'''simple docstring'''
def __init__( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ = {}
lowerCamelCase_ = {}
def __len__( self : Any ) -> List[str]:
"""simple docstring"""
return len(self.parent )
def a__ ( self : List[str] , A_ : Any ) -> Dict:
"""simple docstring"""
if item in self.parent:
return self.find(A_ )
lowerCamelCase_ = item
lowerCamelCase_ = 0
return item
def a__ ( self : List[str] , A_ : Tuple ) -> Optional[int]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(A_ )
if item != self.parent[item]:
lowerCamelCase_ = self.find(self.parent[item] )
return self.parent[item]
def a__ ( self : Any , A_ : int , A_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.find(A_ )
lowerCamelCase_ = self.find(A_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowerCamelCase_ = roota
return roota
return None
@staticmethod
def a__ ( A_ : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = graph.num_vertices
lowerCamelCase_ = Graph.UnionFind()
lowerCamelCase_ = []
while num_components > 1:
lowerCamelCase_ = {}
for vertex in graph.get_vertices():
lowerCamelCase_ = -1
lowerCamelCase_ = graph.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = union_find.find(A_ )
lowerCamelCase_ = union_find.find(A_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = cheap_edge[vertex]
if union_find.find(A_ ) != union_find.find(A_ ):
union_find.union(A_ , A_ )
mst_edges.append(cheap_edge[vertex] )
lowerCamelCase_ = num_components - 1
lowerCamelCase_ = Graph.build(edges=A_ )
return mst
| 651
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def a_ ( __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : int = 384
if "tiny" in model_name:
snake_case : List[str] = [3, 3, 9, 3]
snake_case : int = [96, 192, 384, 768]
if "small" in model_name:
snake_case : Any = [3, 3, 27, 3]
snake_case : int = [96, 192, 384, 768]
if "base" in model_name:
snake_case : int = [3, 3, 27, 3]
snake_case : int = [128, 256, 512, 1_024]
snake_case : Optional[int] = 512
if "large" in model_name:
snake_case : Dict = [3, 3, 27, 3]
snake_case : List[Any] = [192, 384, 768, 1_536]
snake_case : List[Any] = 768
if "xlarge" in model_name:
snake_case : str = [3, 3, 27, 3]
snake_case : Dict = [256, 512, 1_024, 2_048]
snake_case : List[str] = 1_024
# set label information
snake_case : Dict = 150
snake_case : Dict = '''huggingface/label-files'''
snake_case : List[Any] = '''ade20k-id2label.json'''
snake_case : Any = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case : Any = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case : List[Any] = {v: k for k, v in idalabel.items()}
snake_case : Optional[int] = ConvNextConfig(
depths=__magic_name__ , hidden_sizes=__magic_name__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
snake_case : str = UperNetConfig(
backbone_config=__magic_name__ , auxiliary_in_channels=__magic_name__ , num_labels=__magic_name__ , idalabel=__magic_name__ , labelaid=__magic_name__ , )
return config
def a_ ( __magic_name__ ) -> Dict:
"""simple docstring"""
snake_case : Dict = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.stages.{i}.{j}.gamma", F"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.weight", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.bias", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.weight", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.bias", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") )
if i > 0:
rename_keys.append((F"backbone.downsample_layers.{i}.0.weight", F"backbone.encoder.stages.{i}.downsampling_layer.0.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.0.bias", F"backbone.encoder.stages.{i}.downsampling_layer.0.bias") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.weight", F"backbone.encoder.stages.{i}.downsampling_layer.1.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.bias", F"backbone.encoder.stages.{i}.downsampling_layer.1.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
"""simple docstring"""
snake_case : Dict = dct.pop(__magic_name__ )
snake_case : Tuple = val
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
"""simple docstring"""
snake_case : List[Any] = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
snake_case : Optional[Any] = model_name_to_url[model_name]
snake_case : List[Any] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''state_dict''']
snake_case : Tuple = get_upernet_config(__magic_name__ )
snake_case : int = UperNetForSemanticSegmentation(__magic_name__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
snake_case : Any = state_dict.pop(__magic_name__ )
if "bn" in key:
snake_case : Union[str, Any] = key.replace('''bn''' , '''batch_norm''' )
snake_case : List[Any] = val
# rename keys
snake_case : int = create_rename_keys(__magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# verify on image
snake_case : Dict = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
snake_case : str = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ).convert('''RGB''' )
snake_case : int = SegformerImageProcessor()
snake_case : List[Any] = processor(__magic_name__ , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
snake_case : Union[str, Any] = model(__magic_name__ )
if model_name == "upernet-convnext-tiny":
snake_case : str = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
snake_case : Optional[Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
snake_case : List[str] = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
snake_case : Dict = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
snake_case : Union[str, Any] = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __magic_name__ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(F"openmmlab/{model_name}" )
processor.push_to_hub(F"openmmlab/{model_name}" )
if __name__ == "__main__":
_a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[f"upernet-convnext-{size}" for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_a : Union[str, Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 598
|
import operator as op
def a_ ( __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : str = []
snake_case : Any = lambda __magic_name__ , __magic_name__ : int(x / y ) # noqa: E731 integer division operation
snake_case : Optional[Any] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) , '''Action'''.center(12 ) , '''Stack''' , sep=''' | ''' )
print('''-''' * (30 + len(__magic_name__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__magic_name__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(12 ) , ''','''.join(__magic_name__ ) , sep=''' | ''' )
else:
snake_case : Optional[int] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(12 ) , ''','''.join(__magic_name__ ) , sep=''' | ''' )
snake_case : Optional[Any] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(12 ) , ''','''.join(__magic_name__ ) , sep=''' | ''' )
stack.append(
str(opr[x](int(__magic_name__ ) , int(__magic_name__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(12 ) , ''','''.join(__magic_name__ ) , sep=''' | ''' , )
return int(stack[0] )
if __name__ == "__main__":
_a : Union[str, Any] = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 598
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
__lowercase :Union[str, Any] = logging.getLogger(__name__)
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=__lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=__lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(
default=__lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
snake_case_ = field(
default=__lowercase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
snake_case_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case_ = field(
default=__lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(default=__lowercase , metadata={"help": "The input training data file (a text file)."} )
snake_case_ = field(
default=__lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
snake_case_ = field(
default=__lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
snake_case_ = field(
default=__lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
snake_case_ = field(
default=__lowercase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=__lowercase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
snake_case_ = field(
default=__lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case_ = field(
default=__lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def A_ ( self : Optional[int] ) ->Any:
if self.train_file is not None:
SCREAMING_SNAKE_CASE__ : str = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
SCREAMING_SNAKE_CASE__ : List[str] = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _a :
"""simple docstring"""
snake_case_ = 42
snake_case_ = True
snake_case_ = None
snake_case_ = None
def __call__( self : Union[str, Any] , a : Any ) ->str:
SCREAMING_SNAKE_CASE__ : Dict = "label" if "label" in features[0].keys() else "labels"
SCREAMING_SNAKE_CASE__ : Optional[Any] = [feature.pop(_A ) for feature in features]
SCREAMING_SNAKE_CASE__ : List[str] = len(_A )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(features[0]["input_ids"] )
SCREAMING_SNAKE_CASE__ : List[Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(_A )] for feature in features
]
SCREAMING_SNAKE_CASE__ : Tuple = list(chain(*_A ) )
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
SCREAMING_SNAKE_CASE__ : Optional[Any] = {k: v.view(_A , _A , -1 ) for k, v in batch.items()}
# Add back labels
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(_A , dtype=torch.intaa )
return batch
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , __snake_case , __snake_case )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(__snake_case )
datasets.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE__ : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE__ : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
if data_args.train_file is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
SCREAMING_SNAKE_CASE__ : Any = data_args.validation_file
SCREAMING_SNAKE_CASE__ : int = data_args.train_file.split("." )[-1]
SCREAMING_SNAKE_CASE__ : str = load_dataset(
__snake_case , data_files=__snake_case , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
SCREAMING_SNAKE_CASE__ : Dict = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ : Any = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
SCREAMING_SNAKE_CASE__ : List[Any] = [f"""ending{i}""" for i in range(4 )]
SCREAMING_SNAKE_CASE__ : Optional[int] = "sent1"
SCREAMING_SNAKE_CASE__ : Optional[int] = "sent2"
if data_args.max_seq_length is None:
SCREAMING_SNAKE_CASE__ : str = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
SCREAMING_SNAKE_CASE__ : Tuple = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE__ : str = [[context] * 4 for context in examples[context_name]]
SCREAMING_SNAKE_CASE__ : Optional[int] = examples[question_header_name]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__snake_case )
]
# Flatten out
SCREAMING_SNAKE_CASE__ : Optional[int] = list(chain(*__snake_case ) )
SCREAMING_SNAKE_CASE__ : Tuple = list(chain(*__snake_case ) )
# Tokenize
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(
__snake_case , __snake_case , truncation=__snake_case , max_length=__snake_case , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__snake_case ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
SCREAMING_SNAKE_CASE__ : List[Any] = raw_datasets["train"]
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(len(__snake_case ) , data_args.max_train_samples )
SCREAMING_SNAKE_CASE__ : Dict = train_dataset.select(range(__snake_case ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
SCREAMING_SNAKE_CASE__ : List[str] = train_dataset.map(
__snake_case , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
SCREAMING_SNAKE_CASE__ : Dict = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE__ : Dict = min(len(__snake_case ) , data_args.max_eval_samples )
SCREAMING_SNAKE_CASE__ : Tuple = eval_dataset.select(range(__snake_case ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
SCREAMING_SNAKE_CASE__ : List[Any] = eval_dataset.map(
__snake_case , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
SCREAMING_SNAKE_CASE__ : Dict = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__snake_case , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowerCamelCase : str ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = eval_predictions
SCREAMING_SNAKE_CASE__ : Tuple = np.argmax(__snake_case , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Trainer(
model=__snake_case , args=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , compute_metrics=__snake_case , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = last_checkpoint
SCREAMING_SNAKE_CASE__ : Optional[int] = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model() # Saves the tokenizer too for easy upload
SCREAMING_SNAKE_CASE__ : str = train_result.metrics
SCREAMING_SNAKE_CASE__ : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
SCREAMING_SNAKE_CASE__ : int = min(__snake_case , len(__snake_case ) )
trainer.log_metrics("train" , __snake_case )
trainer.save_metrics("train" , __snake_case )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE__ : Optional[int] = trainer.evaluate()
SCREAMING_SNAKE_CASE__ : Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__snake_case )
SCREAMING_SNAKE_CASE__ : int = min(__snake_case , len(__snake_case ) )
trainer.log_metrics("eval" , __snake_case )
trainer.save_metrics("eval" , __snake_case )
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**__snake_case )
else:
trainer.create_model_card(**__snake_case )
def UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 704
|
def UpperCAmelCase ( _lowerCamelCase : int = 4_000_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [0, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"{solution() = }")
| 26
| 0
|
import math
import sys
def _UpperCamelCase ( snake_case__ ) -> int:
if number != int(snake_case__ ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
__UpperCAmelCase : Any = [-1] * (number + 1)
__UpperCAmelCase : Dict = 0
for i in range(1, number + 1 ):
__UpperCAmelCase : str = sys.maxsize
__UpperCAmelCase : Any = int(math.sqrt(snake_case__ ) )
for j in range(1, root + 1 ):
__UpperCAmelCase : Dict = 1 + answers[i - (j**2)]
__UpperCAmelCase : Union[str, Any] = min(snake_case__, snake_case__ )
__UpperCAmelCase : List[Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 382
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[int] = "audio-spectrogram-transformer"
def __init__( self: Any , __lowerCamelCase: Union[str, Any]=7_68 , __lowerCamelCase: List[str]=12 , __lowerCamelCase: Optional[Any]=12 , __lowerCamelCase: Optional[int]=30_72 , __lowerCamelCase: Optional[Any]="gelu" , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Optional[Any]=0.0 , __lowerCamelCase: Tuple=0.02 , __lowerCamelCase: List[Any]=1e-12 , __lowerCamelCase: Tuple=16 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Optional[Any]=10 , __lowerCamelCase: str=10 , __lowerCamelCase: Any=10_24 , __lowerCamelCase: Dict=1_28 , **__lowerCamelCase: Optional[Any] , ) -> List[str]:
super().__init__(**__lowerCamelCase )
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : int = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : int = patch_size
__UpperCAmelCase : Any = qkv_bias
__UpperCAmelCase : str = frequency_stride
__UpperCAmelCase : Union[str, Any] = time_stride
__UpperCAmelCase : Dict = max_length
__UpperCAmelCase : Optional[Any] = num_mel_bins
| 382
| 1
|
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : CLIPSegForImageSegmentation , __SCREAMING_SNAKE_CASE : CLIPSegProcessor , __SCREAMING_SNAKE_CASE : AutoencoderKL , __SCREAMING_SNAKE_CASE : CLIPTextModel , __SCREAMING_SNAKE_CASE : CLIPTokenizer , __SCREAMING_SNAKE_CASE : UNetaDConditionModel , __SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , __SCREAMING_SNAKE_CASE : CLIPImageProcessor , ) -> Union[str, Any]:
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
a_ : int = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , __SCREAMING_SNAKE_CASE , standard_warn=__SCREAMING_SNAKE_CASE )
a_ : int = dict(scheduler.config )
a_ : List[str] = 1
a_ : List[Any] = FrozenDict(__SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
a_ : Union[str, Any] = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , __SCREAMING_SNAKE_CASE , standard_warn=__SCREAMING_SNAKE_CASE )
a_ : int = dict(scheduler.config )
a_ : List[str] = True
a_ : Optional[int] = FrozenDict(__SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=__SCREAMING_SNAKE_CASE , segmentation_processor=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a_ : Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
self.enable_attention_slicing(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
a_ : List[Any] = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__SCREAMING_SNAKE_CASE , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : str , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : Dict , ) -> Dict:
a_ : Dict = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
a_ : Optional[Any] = self.segmentation_model(**__SCREAMING_SNAKE_CASE )
a_ : Any = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
a_ : Union[str, Any] = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
a_ : str = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , )
| 666
|
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCAmelCase ( __A : List[str] , __A : List[Any] ):
a_ : Any = []
for part_id in partition_order:
a_ : str = df.where(f'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__A ):
expected_row_ids_and_row_dicts.append((f'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : Union[str, Any] = spark.range(1_00 ).repartition(1 )
a_ : Any = Spark(__A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : int = spark.range(10 ).repartition(2 )
a_ : Tuple = [1, 0]
a_ : List[str] = _generate_iterable_examples(__A , __A ) # Reverse the partitions.
a_ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , __A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a_ , a_ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(10 ).repartition(1 )
a_ : Tuple = SparkExamplesIterable(__A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__A ):
assert row_id == f'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
a_ : Union[str, Any] = lambda __A : x.reverse()
a_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [2, 1, 0] )
a_ : str = SparkExamplesIterable(__A ).shuffle_data_sources(__A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[str] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a_ : Dict = SparkExamplesIterable(__A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [0, 2] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a_ : List[Any] = SparkExamplesIterable(__A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [1, 3] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[Any] = spark.range(1_00 ).repartition(1 )
a_ : Optional[Any] = Spark(__A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 666
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase_ ( snake_case__):
snake_case__ = '''yolos'''
def __init__( self : str , __UpperCamelCase : Optional[Any]=768 , __UpperCamelCase : str=12 , __UpperCamelCase : List[str]=12 , __UpperCamelCase : Dict=3072 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : Any=0.0 , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : Dict=0.0_2 , __UpperCamelCase : Any=1E-12 , __UpperCamelCase : List[Any]=[512, 864] , __UpperCamelCase : Optional[int]=16 , __UpperCamelCase : Any=3 , __UpperCamelCase : List[str]=True , __UpperCamelCase : Optional[Any]=100 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Dict=False , __UpperCamelCase : Dict=1 , __UpperCamelCase : List[str]=5 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : Dict=5 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : List[str]=0.1 , **__UpperCamelCase : Union[str, Any] , ) -> int:
super().__init__(**__UpperCamelCase )
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = qkv_bias
_UpperCamelCase = num_detection_tokens
_UpperCamelCase = use_mid_position_embeddings
_UpperCamelCase = auxiliary_loss
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
class UpperCAmelCase_ ( snake_case__):
snake_case__ = version.parse('''1.11''')
@property
def _UpperCamelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> float:
return 1E-4
@property
def _UpperCamelCase ( self : Optional[int] ) -> int:
return 12
| 420
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=24 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1000 , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = range_bbox
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase = bbox[i, j, 3]
UpperCamelCase = bbox[i, j, 1]
UpperCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase = bbox[i, j, 2]
UpperCamelCase = bbox[i, j, 0]
UpperCamelCase = t
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = LiltModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(SCREAMING_SNAKE_CASE , bbox=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )
UpperCamelCase = model(SCREAMING_SNAKE_CASE , bbox=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )
UpperCamelCase = model(SCREAMING_SNAKE_CASE , bbox=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = LiltForTokenClassification(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(
SCREAMING_SNAKE_CASE , bbox=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
UpperCamelCase = LiltForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(
SCREAMING_SNAKE_CASE , bbox=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowercase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return True
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = LiltModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = LiltModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_torch
@slow
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([[1, 2]] , device=SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase = model(input_ids=SCREAMING_SNAKE_CASE , bbox=SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.Size([1, 2, 768] )
UpperCamelCase = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=SCREAMING_SNAKE_CASE , )
self.assertTrue(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
| 606
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
_lowercase = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
_lowercase = {f"funnel-transformer/{name}": 512 for name in _model_names}
_lowercase = {f"funnel-transformer/{name}": {'''do_lower_case''': True} for name in _model_names}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = FunnelTokenizer
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = 2
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Any=None ,lowerCAmelCase__ : Optional[int]=None ,lowerCAmelCase__ : Optional[Any]=True ,lowerCAmelCase__ : List[str]="<unk>" ,lowerCAmelCase__ : int="<sep>" ,lowerCAmelCase__ : Union[str, Any]="<pad>" ,lowerCAmelCase__ : List[str]="<cls>" ,lowerCAmelCase__ : Optional[int]="<mask>" ,lowerCAmelCase__ : Union[str, Any]="<s>" ,lowerCAmelCase__ : List[str]="</s>" ,lowerCAmelCase__ : Optional[int]=True ,lowerCAmelCase__ : Tuple=True ,lowerCAmelCase__ : Any=None ,lowerCAmelCase__ : List[Any]="##" ,**lowerCAmelCase__ : int ,) -> List[Any]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ ,tokenizer_file=lowerCAmelCase__ ,do_lower_case=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,clean_text=lowerCAmelCase__ ,tokenize_chinese_chars=lowerCAmelCase__ ,strip_accents=lowerCAmelCase__ ,wordpieces_prefix=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" ,lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" ,lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" ,lowerCAmelCase__ ) != tokenize_chinese_chars
):
lowerCAmelCase_ : Optional[int] = getattr(lowerCAmelCase__ ,normalizer_state.pop("type" ) )
lowerCAmelCase_ : List[Any] = do_lower_case
lowerCAmelCase_ : List[str] = strip_accents
lowerCAmelCase_ : Any = tokenize_chinese_chars
lowerCAmelCase_ : List[Any] = normalizer_class(**lowerCAmelCase__ )
lowerCAmelCase_ : int = do_lower_case
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : int ,lowerCAmelCase__ : str=None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowerCAmelCase_ : str = self._tokenizer.model.save(lowerCAmelCase__ ,name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 683
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowercase = logging.getLogger(__name__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , ):
lowerCAmelCase_ : List[Any] = bnb_quantization_config.load_in_abit
lowerCAmelCase_ : Optional[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed.")
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed.")
lowerCAmelCase_ : List[str] = []
# custom device map
if isinstance(snake_case__ , snake_case__) and len(device_map.keys()) > 1:
lowerCAmelCase_ : Union[str, Any] = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase_ : Union[str, Any] = get_keys_to_not_convert(snake_case__)
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(snake_case__)
lowerCAmelCase_ : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : int = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(snake_case__)
# compatibility with peft
lowerCAmelCase_ : Optional[int] = load_in_abit
lowerCAmelCase_ : List[str] = load_in_abit
lowerCAmelCase_ : Optional[int] = get_parameter_device(snake_case__)
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager.")
lowerCAmelCase_ : Union[str, Any] = replace_with_bnb_layers(snake_case__ , snake_case__ , modules_to_not_convert=snake_case__)
# convert param to the right dtype
lowerCAmelCase_ : Any = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules):
param.to(torch.floataa)
if param.dtype != torch.floataa:
lowerCAmelCase_ : Optional[int] = name.replace(".weight" , "").replace(".bias" , "")
lowerCAmelCase_ : Optional[int] = getattr(snake_case__ , snake_case__ , snake_case__)
if param is not None:
param.to(torch.floataa)
elif torch.is_floating_point(snake_case__):
param.to(snake_case__)
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device())
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device())
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"We move the model to cuda.")
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''')
else:
with init_empty_weights():
lowerCAmelCase_ : str = replace_with_bnb_layers(
snake_case__ , snake_case__ , modules_to_not_convert=snake_case__)
lowerCAmelCase_ : Optional[int] = get_quantized_model_device_map(
snake_case__ , snake_case__ , snake_case__ , max_memory=snake_case__ , no_split_module_classes=snake_case__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Optional[int] = any(x in list(device_map.values()) for x in ["cpu", "disk"])
load_checkpoint_in_model(
snake_case__ , snake_case__ , snake_case__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=snake_case__ , offload_state_dict=snake_case__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(snake_case__ , device_map=snake_case__ , offload_dir=snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None):
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase_ : Any = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`.")
if isinstance(snake_case__ , snake_case__):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'.")
lowerCAmelCase_ : Dict = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules)
})
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules)
})
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : Union[str, Any] = special_dtypes
lowerCAmelCase_ : Union[str, Any] = no_split_module_classes
lowerCAmelCase_ : Any = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase_ : Tuple = get_balanced_memory(
snake_case__ , low_zero=(device_map == "balanced_low_0") , max_memory=snake_case__ , **snake_case__ , )
lowerCAmelCase_ : Tuple = max_memory
lowerCAmelCase_ : Optional[Any] = infer_auto_device_map(snake_case__ , **snake_case__)
if isinstance(snake_case__ , snake_case__):
# check if don't have any quantized module on the cpu
lowerCAmelCase_ : Union[str, Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase_ : List[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ")
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit")
del device_map_without_some_modules
return device_map
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None):
if modules_to_not_convert is None:
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__)
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug.")
return model
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , ):
lowerCAmelCase_ : str = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase_ : Optional[int] = []
current_key_name.append(snake_case__)
if isinstance(snake_case__ , nn.Linear) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase_ : Optional[int] = ".".join(snake_case__)
lowerCAmelCase_ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase_ : List[Any] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : Tuple = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=snake_case__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False")
lowerCAmelCase_ : List[str] = module.weight.data
if module.bias is not None:
lowerCAmelCase_ : Any = module.bias.data
bnb_module.requires_grad_(snake_case__)
setattr(snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = True
if len(list(module.children())) > 0:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : Optional[int] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def UpperCamelCase ( snake_case__):
# Create a copy of the model
with init_empty_weights():
lowerCAmelCase_ : List[Any] = deepcopy(snake_case__) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase_ : Dict = find_tied_parameters(snake_case__)
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__):
lowerCAmelCase_ : List[str] = sum(list(tied_params.values()) , []) + list(tied_params.keys())
else:
lowerCAmelCase_ : Optional[Any] = sum(snake_case__ , [])
lowerCAmelCase_ : List[Any] = len(snake_case__) > 0
# Check if it is a base model
lowerCAmelCase_ : List[str] = False
if hasattr(snake_case__ , "base_model_prefix"):
lowerCAmelCase_ : Tuple = not hasattr(snake_case__ , model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase_ : Union[str, Any] = list(model.named_children())
lowerCAmelCase_ : Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase_ : Any = set(snake_case__) - set(snake_case__)
lowerCAmelCase_ : Tuple = list(set(snake_case__)) + list(snake_case__)
# remove ".weight" from the keys
lowerCAmelCase_ : List[str] = [".weight", ".bias"]
lowerCAmelCase_ : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase_ : str = name.replace(snake_case__ , "")
filtered_module_names.append(snake_case__)
return filtered_module_names
def UpperCamelCase ( snake_case__):
for m in model.modules():
if isinstance(snake_case__ , bnb.nn.Linearabit):
return True
return False
def UpperCamelCase ( snake_case__):
return next(parameter.parameters()).device
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(snake_case__ , snake_case__ , 0 , dtype=snake_case__ , value=snake_case__)
lowerCAmelCase_ : str = param_name
lowerCAmelCase_ : Tuple = model
if "." in tensor_name:
lowerCAmelCase_ : Dict = tensor_name.split(".")
for split in splits[:-1]:
lowerCAmelCase_ : Any = getattr(snake_case__ , snake_case__)
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''')
lowerCAmelCase_ : Union[str, Any] = new_module
lowerCAmelCase_ : Any = splits[-1]
# offload weights
lowerCAmelCase_ : List[Any] = False
offload_weight(module._parameters[tensor_name] , snake_case__ , snake_case__ , index=snake_case__)
if hasattr(module._parameters[tensor_name] , "SCB"):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB") , snake_case__ , index=snake_case__ , )
else:
offload_weight(snake_case__ , snake_case__ , snake_case__ , index=snake_case__)
offload_weight(snake_case__ , param_name.replace("weight" , "SCB") , snake_case__ , index=snake_case__)
set_module_tensor_to_device(snake_case__ , snake_case__ , "meta" , dtype=snake_case__ , value=torch.empty(*param.size()))
| 683
| 1
|
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def _a ( _lowerCamelCase="ro" , _lowerCamelCase="en" , _lowerCamelCase="wmt16" , _lowerCamelCase=None ) -> None:
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
__snake_case : Dict = F'''{src_lang}-{tgt_lang}'''
print(F'''Converting {dataset}-{pair}''' )
__snake_case : Tuple = datasets.load_dataset(_lowerCamelCase , _lowerCamelCase )
if save_dir is None:
__snake_case : Optional[Any] = F'''{dataset}-{pair}'''
__snake_case : List[Any] = Path(_lowerCamelCase )
save_dir.mkdir(exist_ok=_lowerCamelCase )
for split in ds.keys():
print(F'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
__snake_case : List[str] = """val""" if split == """validation""" else split
__snake_case : List[Any] = save_dir.joinpath(F'''{fn}.source''' )
__snake_case : Union[str, Any] = save_dir.joinpath(F'''{fn}.target''' )
__snake_case : Union[str, Any] = src_path.open("""w+""" )
__snake_case : Dict = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__snake_case : Optional[Any] = x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(F'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 26
|
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def lowercase (_lowerCAmelCase ):
def decorator(_lowerCAmelCase ):
__lowerCAmelCase = getattr(_lowerCAmelCase , """handle_key""" , [] )
handle += [key]
setattr(_lowerCAmelCase , """handle_key""" , _lowerCAmelCase )
return func
return decorator
def lowercase (*_lowerCAmelCase ):
def decorator(_lowerCAmelCase ):
__lowerCAmelCase = getattr(_lowerCAmelCase , """handle_key""" , [] )
handle += keys
setattr(_lowerCAmelCase , """handle_key""" , _lowerCAmelCase )
return func
return decorator
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def __new__( cls , snake_case_ , snake_case_ , snake_case_ ) -> Any:
__lowerCAmelCase = super().__new__(cls , snake_case_ , snake_case_ , snake_case_ )
if not hasattr(snake_case_ , """key_handler""" ):
setattr(snake_case_ , """key_handler""" , {} )
setattr(snake_case_ , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__lowerCAmelCase = getattr(snake_case_ , """handle_key""" , [] )
for key in handled_keys:
__lowerCAmelCase = value
return new_cls
@staticmethod
def A__ ( cls ) -> Tuple:
__lowerCAmelCase = get_character()
if char != KEYMAP["undefined"]:
__lowerCAmelCase = ord(snake_case_ )
__lowerCAmelCase = cls.key_handler.get(snake_case_ )
if handler:
__lowerCAmelCase = char
return handler(cls )
else:
return None
def lowercase (cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 465
| 0
|
'''simple docstring'''
__lowerCamelCase : int = """Tobias Carryer"""
from time import time
class lowerCAmelCase__ :
def __init__( self : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any]=int(time() ) ) -> Dict: # noqa: B008
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = multiplier
lowerCamelCase_ : Optional[int] = increment
lowerCamelCase_ : Any = modulo
lowerCamelCase_ : str = seed
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowerCamelCase_ : Any = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__lowerCamelCase : Union[str, Any] = LinearCongruentialGenerator(1664525, 1013904223, 2 << 31)
while True:
print(lcg.next_number())
| 418
|
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __snake_case (__UpperCAmelCase="" ):
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = tempfile.mkdtemp()
return os.path.join(__UpperCAmelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ : List[str] = torch.rand(12 , dtype=torch.floataa ) - 0.5
lowerCamelCase_ : List[str] = AgentAudio(UpperCamelCase_ )
lowerCamelCase_ : int = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCamelCase_ , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
# Ensure that the file contains the same value as the original tensor
lowerCamelCase_ , lowerCamelCase_ : int = sf.read(UpperCamelCase_ )
self.assertTrue(torch.allclose(UpperCamelCase_ , torch.tensor(UpperCamelCase_ ) , atol=1e-4 ) )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = torch.rand(12 , dtype=torch.floataa ) - 0.5
lowerCamelCase_ : Tuple = get_new_path(suffix='''.wav''' )
sf.write(UpperCamelCase_ , UpperCamelCase_ , 16_000 )
lowerCamelCase_ : Any = AgentAudio(UpperCamelCase_ )
self.assertTrue(torch.allclose(UpperCamelCase_ , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , UpperCamelCase_ )
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Any = torch.randint(0 , 256 , (64, 64, 3) )
lowerCamelCase_ : str = AgentImage(UpperCamelCase_ )
lowerCamelCase_ : int = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCamelCase_ , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
lowerCamelCase_ : List[str] = Image.open(UpperCamelCase_ )
lowerCamelCase_ : List[Any] = AgentImage(UpperCamelCase_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
lowerCamelCase_ : List[str] = Image.open(UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = AgentImage(UpperCamelCase_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : int = '''Hey!'''
lowerCamelCase_ : Tuple = AgentText(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , agent_type.to_string() )
self.assertEqual(UpperCamelCase_ , agent_type.to_raw() )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 418
| 1
|
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def __a ( A__ , A__ , A__ ) -> float:
lowerCAmelCase = x
lowerCAmelCase = y
for step in range(A__ ): # noqa: B007
lowerCAmelCase = a * a - b * b + x
lowerCAmelCase = 2 * a * b + y
lowerCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __a ( A__ ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def __a ( A__ ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(A__ , 1 , 1 ) )
def __a ( A__ = 800 , A__ = 600 , A__ = -0.6 , A__ = 0 , A__ = 3.2 , A__ = 50 , A__ = True , ) -> Image.Image:
lowerCAmelCase = Image.new("RGB" , (image_width, image_height) )
lowerCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(A__ ):
for image_y in range(A__ ):
# determine the figure-coordinates based on the image-coordinates
lowerCAmelCase = figure_width / image_width * image_height
lowerCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCAmelCase = get_distance(A__ , A__ , A__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCAmelCase = get_color_coded_rgb(A__ )
else:
lowerCAmelCase = get_black_and_white_rgb(A__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowercase : str = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 649
|
'''simple docstring'''
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
pass
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
pass
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = [
[],
[],
[],
]
def __A ( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 1_0_0:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(SCREAMING_SNAKE_CASE )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def __A ( self : Dict ) -> int:
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self : Any ) -> str:
"""simple docstring"""
return "\n".join(f"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = []
def __A ( self : str , SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
if len(self.queue ) == 1_0_0:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(SCREAMING_SNAKE_CASE )
def __A ( self : Optional[int] ) -> int:
"""simple docstring"""
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
lowerCAmelCase = min(self.queue )
self.queue.remove(SCREAMING_SNAKE_CASE )
return data
def __str__( self : int ) -> str:
"""simple docstring"""
return str(self.queue )
def __a ( ) -> List[str]:
lowerCAmelCase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(A__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(A__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __a ( ) -> int:
lowerCAmelCase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(A__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(A__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 649
| 1
|
from __future__ import annotations
def lowerCAmelCase ( UpperCamelCase__ : str ) -> list[int]:
"""simple docstring"""
return [ord(UpperCamelCase__ ) - 96 for elem in plain]
def lowerCAmelCase ( UpperCamelCase__ : list[int] ) -> str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def lowerCAmelCase ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , UpperCamelCase__ )
print('''Decoded:''' , decode(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
| 146
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( __lowercase ):
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCAmelCase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_lowerCAmelCase , '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_lowerCAmelCase , '''num_attention_heads''' ) )
class a :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=32 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=640 , _lowerCAmelCase=4 , _lowerCAmelCase="silu" , _lowerCAmelCase=3 , _lowerCAmelCase=32 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=10 , _lowerCAmelCase=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = parent
__SCREAMING_SNAKE_CASE: Optional[int] = batch_size
__SCREAMING_SNAKE_CASE: Dict = image_size
__SCREAMING_SNAKE_CASE: Optional[int] = patch_size
__SCREAMING_SNAKE_CASE: Any = num_channels
__SCREAMING_SNAKE_CASE: Tuple = last_hidden_size
__SCREAMING_SNAKE_CASE: Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE: int = hidden_act
__SCREAMING_SNAKE_CASE: Dict = conv_kernel_size
__SCREAMING_SNAKE_CASE: List[str] = output_stride
__SCREAMING_SNAKE_CASE: Optional[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE: List[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE: str = classifier_dropout_prob
__SCREAMING_SNAKE_CASE: int = use_labels
__SCREAMING_SNAKE_CASE: List[Any] = is_training
__SCREAMING_SNAKE_CASE: Optional[Any] = num_labels
__SCREAMING_SNAKE_CASE: Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE: Optional[Any] = scope
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE: Optional[int] = None
__SCREAMING_SNAKE_CASE: List[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE: int = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE: int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__SCREAMING_SNAKE_CASE: int = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ ( self ):
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = MobileViTModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.num_labels
__SCREAMING_SNAKE_CASE: List[Any] = MobileViTForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE: Optional[Any] = MobileViTForSemanticSegmentation(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Any = model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__SCREAMING_SNAKE_CASE: Optional[int] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Any = config_and_inputs
__SCREAMING_SNAKE_CASE: Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( __lowercase ,__lowercase ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : str = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Optional[int] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = MobileViTModelTester(self )
__SCREAMING_SNAKE_CASE: Dict = MobileViTConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def snake_case_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def snake_case_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def snake_case_ ( self ):
"""simple docstring"""
pass
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE: Tuple = model_class(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE: List[str] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE: Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case_ ( self ):
"""simple docstring"""
pass
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: int = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE: int = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: List[str] = outputs.hidden_states
__SCREAMING_SNAKE_CASE: str = 5
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__SCREAMING_SNAKE_CASE: Union[str, Any] = 2
for i in range(len(_lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE: List[str] = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE: str = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCAmelCase )
@slow
def snake_case_ ( self ):
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE: int = MobileViTModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def lowerCAmelCase ( ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ):
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = self.default_image_processor
__SCREAMING_SNAKE_CASE: Optional[int] = prepare_img()
__SCREAMING_SNAKE_CASE: Union[str, Any] = image_processor(images=_lowerCAmelCase , return_tensors='''pt''' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE: Tuple = model(**_lowerCAmelCase )
# verify the logits
__SCREAMING_SNAKE_CASE: Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: str = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__SCREAMING_SNAKE_CASE: int = model.to(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__SCREAMING_SNAKE_CASE: Optional[int] = prepare_img()
__SCREAMING_SNAKE_CASE: Optional[int] = image_processor(images=_lowerCAmelCase , return_tensors='''pt''' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE: Dict = model(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = outputs.logits
# verify the logits
__SCREAMING_SNAKE_CASE: int = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Dict = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=_lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__SCREAMING_SNAKE_CASE: Dict = model.to(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__SCREAMING_SNAKE_CASE: Optional[int] = prepare_img()
__SCREAMING_SNAKE_CASE: int = image_processor(images=_lowerCAmelCase , return_tensors='''pt''' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE: Tuple = model(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[Any] = outputs.logits.detach().cpu()
__SCREAMING_SNAKE_CASE: Dict = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase , target_sizes=[(50, 60)] )
__SCREAMING_SNAKE_CASE: Dict = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Dict = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _lowerCAmelCase )
| 146
| 1
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> Optional[Any]:
# Input as list
A__ = list(poly_a or [0] )[:]
A__ = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
A__ = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
A__ = len(self.polyB )
# Add 0 to make lengths equal a power of 2
A__ = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
A__ = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
A__ = self.__multiply()
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
A__ = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return dft[0]
#
A__ = self.c_max_length // 2
while next_ncol > 0:
A__ = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
A__ = self.root**next_ncol
# First half of next step
A__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
A__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
A__ = new_dft
A__ = next_ncol // 2
return dft[0]
def snake_case__ ( self ) -> int:
A__ = self.__dft("A" )
A__ = self.__dft("B" )
A__ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
A__ = 2
while next_ncol <= self.c_max_length:
A__ = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
A__ = self.root ** (next_ncol // 2)
A__ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
A__ = new_inverse_c
next_ncol *= 2
# Unpack
A__ = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ) -> Optional[Any]:
A__ = "A = " + " + ".join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
A__ = "B = " + " + ".join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
A__ = "A*B = " + " + ".join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return f"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a__ ( lowercase__ , lowercase__ , lowercase__=1_0_2_4 , lowercase__=1_0_2_4 , lowercase__=False , **lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =AutoTokenizer.from_pretrained(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="train" , **lowercase__ )
UpperCAmelCase_ =tok.pad_token_id
def get_lens(lowercase__ ):
UpperCAmelCase_ =tqdm(
DataLoader(lowercase__ , batch_size=5_1_2 , num_workers=8 , shuffle=lowercase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCAmelCase_ =[]
for batch in dl:
UpperCAmelCase_ =batch["input_ids"].ne(lowercase__ ).sum(1 ).tolist()
UpperCAmelCase_ =batch["labels"].ne(lowercase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase__ , lowercase__ ):
max_lens.append(max(lowercase__ , lowercase__ ) )
else:
max_lens.extend(lowercase__ )
return max_lens
UpperCAmelCase_ =get_lens(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="val" , **lowercase__ )
UpperCAmelCase_ =get_lens(lowercase__ )
pickle_save(lowercase__ , train_ds.len_file )
pickle_save(lowercase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 54
| 0
|
__UpperCAmelCase : int = range(2, 20 + 1)
__UpperCAmelCase : List[str] = [10**k for k in range(ks[-1] + 1)]
__UpperCAmelCase : int = {}
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
UpperCamelCase : Dict = sum(a_i[j] for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) )
UpperCamelCase : List[Any] = sum(a_i[j] * base[j] for j in range(min(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) )
UpperCamelCase , UpperCamelCase : Tuple = 0, 0
UpperCamelCase : Tuple = n - i
UpperCamelCase : List[str] = memo.get(SCREAMING_SNAKE_CASE_ )
if sub_memo is not None:
UpperCamelCase : List[str] = sub_memo.get(SCREAMING_SNAKE_CASE_ )
if jumps is not None and len(SCREAMING_SNAKE_CASE_ ) > 0:
# find and make the largest jump without going over
UpperCamelCase : Optional[int] = -1
for _k in range(len(SCREAMING_SNAKE_CASE_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCamelCase : List[str] = _k
break
if max_jump >= 0:
UpperCamelCase , UpperCamelCase , UpperCamelCase : int = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCamelCase : Union[str, Any] = diff + c
for j in range(min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) ):
UpperCamelCase , UpperCamelCase : Any = divmod(SCREAMING_SNAKE_CASE_ , 1_0 )
if new_c > 0:
add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : str = []
else:
UpperCamelCase : Union[str, Any] = {c: []}
UpperCamelCase : Optional[Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCamelCase , UpperCamelCase : List[str] = next_term(SCREAMING_SNAKE_CASE_ , k - 1 , i + dn , SCREAMING_SNAKE_CASE_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCamelCase , UpperCamelCase : Any = compute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , i + dn , SCREAMING_SNAKE_CASE_ )
diff += _diff
dn += terms_jumped
UpperCamelCase : Tuple = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCamelCase : List[Any] = 0
while j < len(SCREAMING_SNAKE_CASE_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(SCREAMING_SNAKE_CASE_ , (diff, dn, k) )
return (diff, dn)
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(SCREAMING_SNAKE_CASE_ ):
a_i.extend([0 for _ in range(k - len(SCREAMING_SNAKE_CASE_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCamelCase : Any = i
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = 0, 0, 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCamelCase : Optional[Any] = ds_c + ds_b
diff += addend
UpperCamelCase : List[str] = 0
for j in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = a_i[j] + addend
UpperCamelCase , UpperCamelCase : str = divmod(SCREAMING_SNAKE_CASE_ , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return diff, i - start_i
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Tuple = digits[j] + addend
if s >= 1_0:
UpperCamelCase , UpperCamelCase : List[Any] = divmod(SCREAMING_SNAKE_CASE_ , 1_0 )
UpperCamelCase : Any = addend // 1_0 + quotient
else:
UpperCamelCase : Optional[int] = s
UpperCamelCase : Any = addend // 1_0
if addend == 0:
break
while addend > 0:
UpperCamelCase , UpperCamelCase : Optional[Any] = divmod(SCREAMING_SNAKE_CASE_ , 1_0 )
digits.append(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : int = 1_0**1_5 ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = [1]
UpperCamelCase : Optional[int] = 1
UpperCamelCase : Tuple = 0
while True:
UpperCamelCase , UpperCamelCase : Dict = next_term(SCREAMING_SNAKE_CASE_ , 2_0 , i + dn , SCREAMING_SNAKE_CASE_ )
dn += terms_jumped
if dn == n - i:
break
UpperCamelCase : Optional[Any] = 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 700
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "conditional_detr"
__UpperCamelCase : Optional[Any] = ["past_key_values"]
__UpperCamelCase : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=300 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="sine" , __SCREAMING_SNAKE_CASE="resnet50" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.25 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCamelCase : str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = backbone_config.get('''model_type''' )
UpperCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase : Any = config_class.from_dict(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = use_timm_backbone
UpperCamelCase : int = backbone_config
UpperCamelCase : Any = num_channels
UpperCamelCase : Optional[Any] = num_queries
UpperCamelCase : Tuple = d_model
UpperCamelCase : Optional[Any] = encoder_ffn_dim
UpperCamelCase : Optional[int] = encoder_layers
UpperCamelCase : Union[str, Any] = encoder_attention_heads
UpperCamelCase : Optional[Any] = decoder_ffn_dim
UpperCamelCase : Optional[int] = decoder_layers
UpperCamelCase : Optional[Any] = decoder_attention_heads
UpperCamelCase : Any = dropout
UpperCamelCase : List[Any] = attention_dropout
UpperCamelCase : List[Any] = activation_dropout
UpperCamelCase : List[str] = activation_function
UpperCamelCase : Optional[int] = init_std
UpperCamelCase : Optional[Any] = init_xavier_std
UpperCamelCase : Union[str, Any] = encoder_layerdrop
UpperCamelCase : Optional[Any] = decoder_layerdrop
UpperCamelCase : Tuple = encoder_layers
UpperCamelCase : Optional[Any] = auxiliary_loss
UpperCamelCase : Union[str, Any] = position_embedding_type
UpperCamelCase : Optional[int] = backbone
UpperCamelCase : Dict = use_pretrained_backbone
UpperCamelCase : Tuple = dilation
# Hungarian matcher
UpperCamelCase : Union[str, Any] = class_cost
UpperCamelCase : List[Any] = bbox_cost
UpperCamelCase : Optional[Any] = giou_cost
# Loss coefficients
UpperCamelCase : Optional[Any] = mask_loss_coefficient
UpperCamelCase : Optional[int] = dice_loss_coefficient
UpperCamelCase : Optional[Any] = cls_loss_coefficient
UpperCamelCase : Optional[int] = bbox_loss_coefficient
UpperCamelCase : Optional[int] = giou_loss_coefficient
UpperCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self ):
"""simple docstring"""
return self.d_model
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase : List[Any] = self.backbone_config.to_dict()
UpperCamelCase : List[Any] = self.__class__.model_type
return output
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Dict = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-5
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 643
| 0
|
SCREAMING_SNAKE_CASE_ : Optional[int] = 8.314_4598
def SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> float:
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
SCREAMING_SNAKE_CASE_ : Tuple = 300
SCREAMING_SNAKE_CASE_ : List[Any] = 28
SCREAMING_SNAKE_CASE_ : List[Any] = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 375
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
SCREAMING_SNAKE_CASE_ : Optional[Any] = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Optional[int]:
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def SCREAMING_SNAKE_CASE ( snake_case ) -> Dict:
__lowercase = _TestCommandArgs(dataset=snake_case , all_configs=snake_case , save_infos=snake_case )
__lowercase = TestCommand(*snake_case )
test_command.run()
__lowercase = os.path.join(snake_case , 'README.md' )
assert os.path.exists(snake_case )
__lowercase = DatasetInfosDict.from_directory(snake_case )
__lowercase = DatasetInfosDict(
{
'default': DatasetInfo(
features=Features(
{
'tokens': Sequence(Value('string' ) ),
'ner_tags': Sequence(
ClassLabel(names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC'] ) ),
'langs': Sequence(Value('string' ) ),
'spans': Sequence(Value('string' ) ),
} ) , splits=[
{
'name': 'train',
'num_bytes': 2_351_563,
'num_examples': 10_000,
},
{
'name': 'validation',
'num_bytes': 238_418,
'num_examples': 1_000,
},
] , download_size=3_940_680 , dataset_size=2_589_981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__lowercase , __lowercase = getattr(dataset_infos['default'] , snake_case ), getattr(expected_dataset_infos['default'] , snake_case )
if key == "num_bytes":
assert is_apercent_close(snake_case , snake_case )
elif key == "splits":
assert list(snake_case ) == list(snake_case )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 375
| 1
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : List[Any] = GPTSwaTokenizer
_UpperCamelCase : str = False
_UpperCamelCase : List[str] = True
_UpperCamelCase : List[str] = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = GPTSwaTokenizer(a , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str )-> Dict:
"""simple docstring"""
lowercase__ = 'This is a test'
lowercase__ = 'This is a test'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : str )-> int:
"""simple docstring"""
lowercase__ = '<s>'
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Dict:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(a ) , 2_000 )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2_000 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> str:
"""simple docstring"""
lowercase__ = GPTSwaTokenizer(a )
lowercase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [465, 287, 265, 631, 842] )
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
a , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
lowercase__ = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowercase__ = tokenizer.convert_ids_to_tokens(a )
# fmt: off
self.assertListEqual(
a , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = GPTSwaTokenizer(a )
lowercase__ = ['This is a test', 'I was born in 92000, and this is falsé.']
lowercase__ = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(a , a ):
self.assertListEqual(tokenizer.encode_fast(a ) , a )
# Test that decode_fast returns the input text
for text, token_ids in zip(a , a ):
self.assertEqual(tokenizer.decode_fast(a ) , a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
lowercase__ = {'input_ids': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='AI-Sweden/gpt-sw3-126m' , sequences=a , )
| 45
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(_SCREAMING_SNAKE_CASE ) == 1:
return True
lowercase__ = series[1] - series[0]
for index in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> float:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
lowercase__ = 0
for val in series:
answer += val
return answer / len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 1
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
SCREAMING_SNAKE_CASE__ = '''\
Text data.
Second line of data.'''
SCREAMING_SNAKE_CASE__ = '''file'''
@pytest.fixture(scope='session' )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] ):
__a : Union[str, Any] = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
__a : Optional[int] = bytes(lowerCamelCase_ , 'utf-8' )
with zstd.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
with open(os.path.join(tmpfs.local_root_dir , lowerCamelCase_ ) , 'w' ) as f:
f.write(lowerCamelCase_ )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] ):
__a : Optional[int] = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
__a : Dict = input_paths[compression_format]
__a : str = tmp_path / 'cache'
__a : int = DownloadConfig(cache_dir=lowerCamelCase_ , extract_compressed_file=lowerCamelCase_ )
__a : Tuple = cached_path(lowerCamelCase_ , download_config=lowerCamelCase_ )
with open(lowerCamelCase_ ) as f:
__a : Tuple = f.read()
with open(lowerCamelCase_ ) as f:
__a : List[str] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] ):
__a : List[Any] = 'custom_cache'
__a : List[str] = 'custom_extracted_dir'
__a : List[str] = tmp_path / 'custom_extracted_path'
if default_extracted:
__a : Tuple = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , lowerCamelCase_ )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(lowerCamelCase_ ) )
__a : Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__a : str = xz_file
__a : List[str] = (
DownloadConfig(extract_compressed_file=lowerCamelCase_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowerCamelCase_ )
)
__a : List[Any] = cached_path(lowerCamelCase_ , download_config=lowerCamelCase_ )
assert Path(lowerCamelCase_ ).parent.parts[-2:] == expected
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] ):
# absolute path
__a : Dict = str(Path(lowerCamelCase_ ).resolve() )
assert cached_path(lowerCamelCase_ ) == text_file
# relative path
__a : str = str(Path(lowerCamelCase_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCamelCase_ ) == text_file
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
# absolute path
__a : Optional[Any] = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(lowerCamelCase_ ):
cached_path(lowerCamelCase_ )
# relative path
__a : Tuple = './__missing_file__.txt'
with pytest.raises(lowerCamelCase_ ):
cached_path(lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple ):
__a : List[str] = get_from_cache(f'''tmp://{tmpfs_file}''' )
with open(lowerCamelCase_ ) as f:
__a : List[Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCamelCase_ )
def UpperCAmelCase__ ( ):
with pytest.raises(lowerCamelCase_ ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ):
__a : List[str] = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(lowerCamelCase_ ):
http_get('https://huggingface.co' , temp_file=lowerCamelCase_ )
with pytest.raises(lowerCamelCase_ ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] ):
__a : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(lowerCamelCase_ ):
ftp_get('ftp://huggingface.co' , temp_file=lowerCamelCase_ )
with pytest.raises(lowerCamelCase_ ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : Any ):
__a : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(lowerCamelCase_ ):
fsspec_get('s3://huggingface.co' , temp_file=lowerCamelCase_ )
with pytest.raises(lowerCamelCase_ ):
fsspec_head('s3://huggingface.co' )
| 47
|
'''simple docstring'''
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:str = [1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = 0, 0, 0
SCREAMING_SNAKE_CASE:List[str] = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE:Union[str, Any] = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE:Optional[Any] = ugly_nums[ia] * 5
for _ in range(1 , snake_case ):
SCREAMING_SNAKE_CASE:int = min(snake_case , snake_case , snake_case )
ugly_nums.append(snake_case )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE:Dict = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE:int = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE:Optional[Any] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(2_00) = }''')
| 143
| 0
|
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _A (*__a ) -> int:
"""simple docstring"""
with open(__a , '''r''' ) as fh:
fcntl.flock(__a , fcntl.LOCK_EX )
try:
print(*__a )
finally:
fcntl.flock(__a , fcntl.LOCK_UN )
UpperCAmelCase_ : Union[str, Any] = int(os.environ["""LOCAL_RANK"""])
torch.cuda.set_device(local_rank)
UpperCAmelCase_ : Optional[Any] = torch.device("""cuda""", local_rank)
UpperCAmelCase_ : Optional[Any] = socket.gethostname()
UpperCAmelCase_ : str = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group("""nccl""")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase_ : str = dist.get_rank()
UpperCAmelCase_ : List[Any] = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 176
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : int = 10_24
SCREAMING_SNAKE_CASE_ : Dict = 40_96
SCREAMING_SNAKE_CASE_ : Optional[int] = 24
SCREAMING_SNAKE_CASE_ : Any = 16
SCREAMING_SNAKE_CASE_ : int = [5, 11, 17, 23]
SCREAMING_SNAKE_CASE_ : List[str] = [2_56, 5_12, 10_24, 10_24]
SCREAMING_SNAKE_CASE_ : Optional[int] = (1, 3_84, 3_84)
if "nyu" or "midas" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : List[str] = 7_68
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 1, 1, 0.5]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [2_56, 5_12, 7_68, 7_68]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1_50
SCREAMING_SNAKE_CASE_ : str = 16
SCREAMING_SNAKE_CASE_ : Optional[int] = (1, 3_84, 3_84)
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Tuple = '''project'''
if "ade" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Dict = 7_68
SCREAMING_SNAKE_CASE_ : Tuple = [1, 1, 1, 0.5]
SCREAMING_SNAKE_CASE_ : List[str] = 1_50
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 16
SCREAMING_SNAKE_CASE_ : Tuple = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ : List[Any] = '''ade20k-id2label.json'''
SCREAMING_SNAKE_CASE_ : List[str] = json.load(open(cached_download(hf_hub_url(__a , __a , repo_type='''dataset''' ) ) , '''r''' ) )
SCREAMING_SNAKE_CASE_ : Dict = {int(__a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Optional[int] = idalabel
SCREAMING_SNAKE_CASE_ : Optional[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Any = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def _A (__a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(__a , __a )
def _A (__a ) -> Optional[int]:
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
SCREAMING_SNAKE_CASE_ : Dict = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
SCREAMING_SNAKE_CASE_ : int = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def _A (__a , __a ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ : Dict = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
SCREAMING_SNAKE_CASE_ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ : Optional[Any] = in_proj_weight[: config.hidden_size, :]
SCREAMING_SNAKE_CASE_ : Optional[Any] = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_ : Any = in_proj_bias[-config.hidden_size :]
def _A () -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ : Optional[int] = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def _A (__a , __a , __a , __a , __a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = get_dpt_config(__a )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
SCREAMING_SNAKE_CASE_ : int = torch.load(__a , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(__a )
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ : List[Any] = state_dict.pop(__a )
SCREAMING_SNAKE_CASE_ : Dict = val
# read in qkv matrices
read_in_q_k_v(__a , __a )
# load HuggingFace model
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPTForSemanticSegmentation(__a ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(__a )
model.load_state_dict(__a )
model.eval()
# Check outputs on an image
SCREAMING_SNAKE_CASE_ : Dict = 4_80 if '''ade''' in checkpoint_url else 3_84
SCREAMING_SNAKE_CASE_ : Optional[Any] = DPTImageProcessor(size=__a )
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE_ : Tuple = image_processor(__a , return_tensors='''pt''' )
# forward pass
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(**__a ).logits if '''ade''' in checkpoint_url else model(**__a ).predicted_depth
if show_prediction:
SCREAMING_SNAKE_CASE_ : List[Any] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=__a , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_55 ).show()
if pytorch_dump_folder_path is not None:
Path(__a ).mkdir(exist_ok=__a )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__a )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
UpperCAmelCase_ : List[Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 176
| 1
|
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207
|
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( snake_case_ , unittest.TestCase ):
_lowercase: Optional[int] = ProphetNetTokenizer
_lowercase: Union[str, Any] = False
def lowercase__ ( self : Dict ) -> Optional[int]:
super().setUp()
_lowerCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowercase__ ( self : Tuple , __snake_case : str ) -> List[str]:
_lowerCAmelCase = """UNwant\u00E9d,running"""
_lowerCAmelCase = """unwanted, running"""
return input_text, output_text
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
_lowerCAmelCase = self.tokenizer_class(self.vocab_file )
_lowerCAmelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__snake_case , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [9, 6, 7, 12, 10, 11] )
def lowercase__ ( self : List[Any] ) -> List[str]:
_lowerCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowercase__ ( self : Dict ) -> int:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def lowercase__ ( self : Dict ) -> int:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowercase__ ( self : Dict ) -> Optional[Any]:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowercase__ ( self : str ) -> List[str]:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowercase__ ( self : Union[str, Any] ) -> str:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowercase__ ( self : Tuple ) -> str:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowercase__ ( self : Dict ) -> Optional[int]:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def lowercase__ ( self : Any ) -> Dict:
_lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_lowerCAmelCase = {}
for i, token in enumerate(__snake_case ):
_lowerCAmelCase = i
_lowerCAmelCase = WordpieceTokenizer(vocab=__snake_case , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
@require_torch
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
_lowerCAmelCase = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
_lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowerCAmelCase = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
_lowerCAmelCase = tokenizer(__snake_case , padding=__snake_case , return_tensors="""pt""" )
self.assertIsInstance(__snake_case , __snake_case )
_lowerCAmelCase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def lowercase__ ( self : List[Any] ) -> Any:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def lowercase__ ( self : int ) -> int:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
@slow
def lowercase__ ( self : int ) -> Optional[int]:
_lowerCAmelCase = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
_lowerCAmelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=__snake_case )
_lowerCAmelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__snake_case )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__snake_case )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 207
| 1
|
"""simple docstring"""
def lowerCAmelCase ( UpperCamelCase_: List[Any] ) -> Optional[int]:
'''simple docstring'''
_a = len(UpperCamelCase_ )
_a = sum(UpperCamelCase_ )
_a = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_a = True
for i in range(1 , s + 1 ):
_a = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_a = dp[i][j - 1]
if arr[i - 1] <= j:
_a = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_a = s - 2 * j
break
return diff
| 612
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
UpperCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def lowerCAmelCase ( UpperCamelCase_: List[str] ) -> List[Any]:
'''simple docstring'''
_a = {}
with open(UpperCamelCase_ , "r" ) as file:
for line_number, line in enumerate(UpperCamelCase_ ):
_a = line.strip()
if line:
_a = line.split()
_a = line_number
_a = words[0]
_a = value
return result
def lowerCAmelCase ( UpperCamelCase_: List[str] , UpperCamelCase_: Dict , UpperCamelCase_: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for attribute in key.split("." ):
_a = getattr(UpperCamelCase_ , UpperCamelCase_ )
_a = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCamelCase_ ):
_a = PARAM_MAPPING[full_name.split("." )[-1]]
_a = "param"
if weight_type is not None and weight_type != "param":
_a = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape
elif weight_type is not None and weight_type == "param":
_a = hf_pointer
for attribute in hf_param_name.split("." ):
_a = getattr(UpperCamelCase_ , UpperCamelCase_ )
_a = shape_pointer.shape
# let's reduce dimension
_a = value[0]
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
_a = getattr(UpperCamelCase_ , UpperCamelCase_ )
_a = value
else:
_a = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowerCAmelCase ( UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] ) -> str:
'''simple docstring'''
_a = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCamelCase_ ):
_a = PARAM_MAPPING[full_name.split("." )[-1]]
_a = "param"
if weight_type is not None and weight_type != "param":
_a = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_a = ".".join([key, hf_param_name] )
else:
_a = key
_a = value if "lm_head" in full_key else value[0]
UpperCamelCase = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def lowerCAmelCase ( UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int=None , UpperCamelCase_: Optional[int]=None ) -> Tuple:
'''simple docstring'''
_a = False
for key, mapped_key in MAPPING.items():
_a = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_a = True
if "*" in mapped_key:
_a = name.split(UpperCamelCase_ )[0].split("." )[-2]
_a = mapped_key.replace("*" , UpperCamelCase_ )
if "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "bias" in name:
_a = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_a = "weight"
else:
_a = None
if hf_dict is not None:
rename_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return is_used
return is_used
def lowerCAmelCase ( UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Dict ) -> Dict:
'''simple docstring'''
_a = []
_a = fairseq_model.state_dict()
_a = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_a = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hf_model.config.feat_extract_norm == "group" , )
_a = True
else:
_a = load_wavaveca_layer(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if not is_used:
unused_weights.append(UpperCamelCase_ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowerCAmelCase ( UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[int] ) -> Dict:
'''simple docstring'''
_a = full_name.split("conv_layers." )[-1]
_a = name.split("." )
_a = int(items[0] )
_a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
_a = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
_a = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
_a = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
_a = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase_ )
@torch.no_grad()
def lowerCAmelCase ( UpperCamelCase_: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: int=None , UpperCamelCase_: int=None , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=False ) -> Optional[Any]:
'''simple docstring'''
if config_path is not None:
_a = WavaVecaConfig.from_pretrained(UpperCamelCase_ )
else:
_a = WavaVecaConfig()
if is_seq_class:
_a = read_txt_into_dict(UpperCamelCase_ )
_a = idalabel
_a = WavaVecaForSequenceClassification(UpperCamelCase_ )
_a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
feature_extractor.save_pretrained(UpperCamelCase_ )
elif is_finetuned:
if dict_path:
_a = Dictionary.load(UpperCamelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a = target_dict.pad_index
_a = target_dict.bos_index
_a = target_dict.eos_index
_a = len(target_dict.symbols )
_a = os.path.join(UpperCamelCase_ , "vocab.json" )
if not os.path.isdir(UpperCamelCase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCamelCase_ ) )
return
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
_a = target_dict.indices
# fairseq has the <pad> and <s> switched
_a = 0
_a = 1
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
_a = WavaVecaCTCTokenizer(
UpperCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=UpperCamelCase_ , )
_a = True if config.feat_extract_norm == "layer" else False
_a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
_a = WavaVecaProcessor(feature_extractor=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
processor.save_pretrained(UpperCamelCase_ )
_a = WavaVecaForCTC(UpperCamelCase_ )
else:
_a = WavaVecaForPreTraining(UpperCamelCase_ )
if is_finetuned or is_seq_class:
_a , _a , _a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_a = argparse.Namespace(task="audio_pretraining" )
_a = fairseq.tasks.setup_task(UpperCamelCase_ )
_a , _a , _a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCamelCase_ )
_a = model[0].eval()
recursively_load_weights(UpperCamelCase_ , UpperCamelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
UpperCamelCase = parser.parse_args()
UpperCamelCase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 612
| 1
|
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase__ = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self , snake_case__=32 ):
"""simple docstring"""
set_seed(0 )
lowerCAmelCase : Any = UNetaDModel(sample_size=_a , in_channels=3 , out_channels=3 )
lowerCAmelCase : Optional[int] = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCAmelCase : int = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=_a , )
lowerCAmelCase : Optional[int] = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=_a , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCAmelCase : Any = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(_a ) for _ in range(4 )]
lowerCAmelCase : Dict = [torch.randn((4, 3, 32, 32) ).to(_a ) for _ in range(4 )]
lowerCAmelCase : Any = [torch.randint(0 , 1_000 , (4,) ).long().to(_a ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCAmelCase : Optional[Any] = self.get_model_optimizer(resolution=32 )
model.train().to(_a )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase : List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase : Optional[int] = model(_a , timesteps[i] ).sample
lowerCAmelCase : Union[str, Any] = torch.nn.functional.mse_loss(_a , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCAmelCase : Any = self.get_model_optimizer(resolution=32 )
model.train().to(_a )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase : List[Any] = model(_a , timesteps[i] ).sample
lowerCAmelCase : List[str] = torch.nn.functional.mse_loss(_a , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(_a , _a , atol=1e-5 ) )
self.assertTrue(torch.allclose(_a , _a , atol=1e-5 ) )
| 645
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Tuple = SamImageProcessor()
SCREAMING_SNAKE_CASE__ : List[str] = SamProcessor(_a )
processor.save_pretrained(self.tmpdirname )
def _a ( self , **_a ) -> Union[str, Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def _a ( self ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Tuple = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Dict = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Any = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(_a , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Dict = processor(images=_a , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Any = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = [torch.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ : str = [[1_764, 2_646]]
SCREAMING_SNAKE_CASE__ : List[Any] = [[683, 1_024]]
SCREAMING_SNAKE_CASE__ : Any = processor.post_process_masks(_a , _a , _a )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
SCREAMING_SNAKE_CASE__ : Dict = processor.post_process_masks(
_a , torch.tensor(_a ) , torch.tensor(_a ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
SCREAMING_SNAKE_CASE__ : Dict = [np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ : Tuple = processor.post_process_masks(_a , np.array(_a ) , np.array(_a ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
SCREAMING_SNAKE_CASE__ : Dict = [[1, 0], [0, 1]]
with self.assertRaises(_a ):
SCREAMING_SNAKE_CASE__ : Tuple = processor.post_process_masks(_a , np.array(_a ) , np.array(_a ) )
@require_vision
@require_tf
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Optional[int] = SamImageProcessor()
SCREAMING_SNAKE_CASE__ : Dict = SamProcessor(_a )
processor.save_pretrained(self.tmpdirname )
def _a ( self , **_a ) -> List[str]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def _a ( self ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Any = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : int = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[Any] = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Any = image_processor(_a , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Any = processor(images=_a , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [tf.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ : Optional[int] = [[1_764, 2_646]]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[683, 1_024]]
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.post_process_masks(_a , _a , _a , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.post_process_masks(
_a , tf.convert_to_tensor(_a ) , tf.convert_to_tensor(_a ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
SCREAMING_SNAKE_CASE__ : Optional[int] = [np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.post_process_masks(
_a , np.array(_a ) , np.array(_a ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
SCREAMING_SNAKE_CASE__ : Any = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
SCREAMING_SNAKE_CASE__ : str = processor.post_process_masks(
_a , np.array(_a ) , np.array(_a ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Dict = SamImageProcessor()
SCREAMING_SNAKE_CASE__ : Dict = SamProcessor(_a )
processor.save_pretrained(self.tmpdirname )
def _a ( self , **_a ) -> Any:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : int = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ : List[Any] = [tf.convert_to_tensor(_a )]
SCREAMING_SNAKE_CASE__ : Dict = [torch.tensor(_a )]
SCREAMING_SNAKE_CASE__ : Optional[int] = [[1_764, 2_646]]
SCREAMING_SNAKE_CASE__ : List[str] = [[683, 1_024]]
SCREAMING_SNAKE_CASE__ : List[Any] = processor.post_process_masks(
_a , _a , _a , return_tensors="""tf""" )
SCREAMING_SNAKE_CASE__ : List[str] = processor.post_process_masks(
_a , _a , _a , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[Any] = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : str = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : int = image_processor(_a , return_tensors="""pt""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE__ : Any = processor(images=_a , return_tensors="""pt""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(_a , return_tensors="""tf""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE__ : str = processor(images=_a , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(_a , _a ) )
self.assertTrue(np.allclose(_a , _a ) )
self.assertTrue(np.allclose(_a , _a ) )
| 680
| 0
|
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase_ : list[int] , UpperCamelCase_ : list[int] , UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase : int = list(range(len(UpperCamelCase_ ) ) )
_lowerCAmelCase : List[Any] = [v / w for v, w in zip(UpperCamelCase_ , UpperCamelCase_ )]
index.sort(key=lambda UpperCamelCase_ : ratio[i] , reverse=UpperCamelCase_ )
_lowerCAmelCase : float = 0
_lowerCAmelCase : list[float] = [0] * len(UpperCamelCase_ )
for i in index:
if weight[i] <= capacity:
_lowerCAmelCase : int = 1
max_value += value[i]
capacity -= weight[i]
else:
_lowerCAmelCase : Optional[int] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
import cva
import numpy as np
class __snake_case :
def __init__( self : List[str] , _UpperCAmelCase : float , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
if k in (0.04, 0.06):
_lowerCAmelCase : str = k
_lowerCAmelCase : Optional[Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : int ) -> str:
'''simple docstring'''
return str(self.k )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : str ) -> tuple[cva.Mat, list[list[int]]]:
'''simple docstring'''
_lowerCAmelCase : Tuple = cva.imread(_UpperCAmelCase , 0 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = img.shape
_lowerCAmelCase : list[list[int]] = []
_lowerCAmelCase : int = img.copy()
_lowerCAmelCase : str = cva.cvtColor(_UpperCAmelCase , cva.COLOR_GRAY2RGB )
_lowerCAmelCase , _lowerCAmelCase : int = np.gradient(_UpperCAmelCase )
_lowerCAmelCase : Any = dx**2
_lowerCAmelCase : Optional[int] = dy**2
_lowerCAmelCase : Optional[Any] = dx * dy
_lowerCAmelCase : Dict = 0.04
_lowerCAmelCase : Tuple = self.window_size // 2
for y in range(_UpperCAmelCase , h - offset ):
for x in range(_UpperCAmelCase , w - offset ):
_lowerCAmelCase : Optional[int] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowerCAmelCase : int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowerCAmelCase : Any = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowerCAmelCase : Dict = (wxx * wyy) - (wxy**2)
_lowerCAmelCase : Union[str, Any] = wxx + wyy
_lowerCAmelCase : int = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
_lowerCamelCase : int = HarrisCorner(0.0_4, 3)
_lowerCamelCase , _lowerCamelCase : Any = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 196
| 0
|
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=__lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = ["flax", "transformers"]
def __init__( self , *_a , **_a ) -> Dict:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> Optional[int]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> Union[str, Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
class UpperCAmelCase_ ( metaclass=__lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = ["flax", "transformers"]
def __init__( self , *_a , **_a ) -> Union[str, Any]:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> List[str]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> Tuple:
requires_backends(cls , ['''flax''', '''transformers'''] )
class UpperCAmelCase_ ( metaclass=__lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = ["flax", "transformers"]
def __init__( self , *_a , **_a ) -> str:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> Optional[int]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> Dict:
requires_backends(cls , ['''flax''', '''transformers'''] )
class UpperCAmelCase_ ( metaclass=__lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = ["flax", "transformers"]
def __init__( self , *_a , **_a ) -> str:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> Tuple:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> Optional[Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
| 14
|
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
__A =sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
sd_pipe.set_scheduler('''sample_euler''' )
__A ='''A painting of a squirrel eating a burger'''
__A =torch.manual_seed(0 )
__A =sd_pipe([prompt] , generator=lowercase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='''np''' )
__A =output.images
__A =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__A =np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__A =sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
sd_pipe.set_scheduler('''sample_euler''' )
__A ='''A painting of a squirrel eating a burger'''
__A =torch.manual_seed(0 )
__A =sd_pipe([prompt] , generator=lowercase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='''np''' )
__A =output.images
__A =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__A =np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__A =sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
__A ='''A painting of a squirrel eating a burger'''
__A =torch.manual_seed(0 )
__A =sd_pipe(
[prompt] , generator=lowercase__ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='''np''' , use_karras_sigmas=lowercase__ , )
__A =output.images
__A =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__A =np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 184
| 0
|
from maths.prime_factors import prime_factors
def _lowerCamelCase ( a_ : int):
if not isinstance(a_ , a_):
lowerCamelCase :Tuple = F"Input value of [number={number}] must be an integer"
raise TypeError(a_)
if number < 1:
raise ValueError('''Input must be a positive integer''')
return -1 if len(prime_factors(a_)) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
|
import operator as op
def _lowerCamelCase ( a_ : Tuple):
lowerCamelCase :int = []
lowerCamelCase :List[str] = lambda a_ , a_: int(x / y) # noqa: E731 integer division operation
lowerCamelCase :Optional[int] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8) , '''Action'''.center(12) , '''Stack''' , sep=''' | ''')
print('''-''' * (30 + len(a_)))
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(a_) # append x to stack
# output in tabular format
print(x.rjust(8) , ('''push(''' + x + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
else:
lowerCamelCase :Optional[Any] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
lowerCamelCase :str = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + a + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
stack.append(
str(opr[x](int(a_) , int(a_)))) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8) , ('''push(''' + a + x + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''' , )
return int(stack[0])
if __name__ == "__main__":
A__ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 49
| 1
|
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : Tuple = LayoutLMTokenizer
_snake_case : str = LayoutLMTokenizerFast
_snake_case : Optional[int] = True
_snake_case : int = True
def _snake_case ( self : List[Any] ):
'''simple docstring'''
super().setUp()
__lowercase = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _snake_case ( self : Dict , **lowerCamelCase : List[str] ):
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def _snake_case ( self : Optional[int] , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowercase = "UNwant\u00E9d,running"
__lowercase = "unwanted, running"
return input_text, output_text
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [7, 4, 5, 10, 8, 9] )
def _snake_case ( self : Dict ):
'''simple docstring'''
pass
| 402
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : Dict = {
"""configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""],
"""feature_extraction_whisper""": ["""WhisperFeatureExtractor"""],
"""processing_whisper""": ["""WhisperProcessor"""],
"""tokenization_whisper""": ["""WhisperTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : str = ["""WhisperTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[int] = [
"""WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WhisperForConditionalGeneration""",
"""WhisperModel""",
"""WhisperPreTrainedModel""",
"""WhisperForAudioClassification""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = [
"""TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWhisperForConditionalGeneration""",
"""TFWhisperModel""",
"""TFWhisperPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = [
"""FlaxWhisperForConditionalGeneration""",
"""FlaxWhisperModel""",
"""FlaxWhisperPreTrainedModel""",
"""FlaxWhisperForAudioClassification""",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
snake_case__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 402
| 1
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCamelCase (lowerCamelCase , unittest.TestCase ):
lowercase__ = TransfoXLTokenizer
lowercase__ = False
lowercase__ = False
def __lowerCamelCase ( self ):
super().setUp()
__snake_case = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE_ ):
__snake_case = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = '<unk> UNwanted , running'
__snake_case = '<unk> unwanted, running'
return input_text, output_text
def __lowerCamelCase ( self ):
__snake_case = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [0, 4, 8, 7] )
def __lowerCamelCase ( self ):
__snake_case = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def __lowerCamelCase ( self ):
__snake_case = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowerCamelCase ( self ):
__snake_case = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
__snake_case = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
__snake_case = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
__snake_case = self.get_tokenizer()
__snake_case = len(SCREAMING_SNAKE_CASE_ )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 345
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCamelCase_ : Optional[int] = TypeVar("T")
class _lowerCamelCase (Generic[T] ):
def __init__( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = data
__snake_case = None
def __str__( self ):
return f'''{self.data}'''
class _lowerCamelCase (Generic[T] ):
def __init__( self ):
__snake_case = None
def __iter__( self ):
__snake_case = self.top
while node:
yield node.data
__snake_case = node.next
def __str__( self ):
return "->".join([str(SCREAMING_SNAKE_CASE_ ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def __lowerCamelCase ( self ):
return self.top is None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = Node(SCREAMING_SNAKE_CASE_ )
if not self.is_empty():
__snake_case = self.top
__snake_case = node
def __lowerCamelCase ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , SCREAMING_SNAKE_CASE_ )
__snake_case = self.top
__snake_case = self.top.next
return pop_node.data
def __lowerCamelCase ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def __lowerCamelCase ( self ):
__snake_case = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 345
| 1
|
import unittest
from knapsack import greedy_knapsack as kp
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE( self :Dict ) ->Optional[Any]:
lowercase = [10, 20, 30, 40, 50, 60]
lowercase = [2, 4, 6, 8, 10, 12]
lowercase = 100
self.assertEqual(kp.calc_profit(_snake_case , _snake_case , _snake_case ) , 210 )
def SCREAMING_SNAKE_CASE( self :Union[str, Any] ) ->Any:
self.assertRaisesRegex(_snake_case , "max_weight must greater than zero." )
def SCREAMING_SNAKE_CASE( self :Union[str, Any] ) ->Dict:
self.assertRaisesRegex(_snake_case , "Weight can not be negative." )
def SCREAMING_SNAKE_CASE( self :Optional[Any] ) ->Optional[Any]:
self.assertRaisesRegex(_snake_case , "Profit can not be negative." )
def SCREAMING_SNAKE_CASE( self :int ) ->Optional[Any]:
self.assertRaisesRegex(_snake_case , "max_weight must greater than zero." )
def SCREAMING_SNAKE_CASE( self :Optional[int] ) ->Dict:
self.assertRaisesRegex(
_snake_case , "The length of profit and weight must be same." )
if __name__ == "__main__":
unittest.main()
| 441
|
"""simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCamelCase_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase_ : List[Any] = 256
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
snake_case = ["melgan"]
def __init__( self : Dict , _snake_case : SpectrogramNotesEncoder , _snake_case : SpectrogramContEncoder , _snake_case : TaFilmDecoder , _snake_case : DDPMScheduler , _snake_case : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
A_ = math.log(1e-5 ) # Matches MelGAN training.
A_ = 4.0 # Largest value for most examples
A_ = 128
self.register_modules(
notes_encoder=_snake_case , continuous_encoder=_snake_case , decoder=_snake_case , scheduler=_snake_case , melgan=_snake_case , )
def lowerCamelCase__ ( self : Union[str, Any] , _snake_case : Tuple , _snake_case : str=(-1.0, 1.0) , _snake_case : int=False ) -> str:
"""simple docstring"""
A_ , A_ = output_range
if clip:
A_ = torch.clip(_snake_case , self.min_value , self.max_value )
# Scale to [0, 1].
A_ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def lowerCamelCase__ ( self : Dict , _snake_case : Tuple , _snake_case : Optional[Any]=(-1.0, 1.0) , _snake_case : List[str]=False ) -> List[str]:
"""simple docstring"""
A_ , A_ = input_range
A_ = torch.clip(_snake_case , _snake_case , _snake_case ) if clip else outputs
# Scale to [0, 1].
A_ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def lowerCamelCase__ ( self : Union[str, Any] , _snake_case : str , _snake_case : List[Any] , _snake_case : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A_ = input_tokens > 0
A_ , A_ = self.notes_encoder(
encoder_input_tokens=_snake_case , encoder_inputs_mask=_snake_case )
A_ , A_ = self.continuous_encoder(
encoder_inputs=_snake_case , encoder_inputs_mask=_snake_case )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def lowerCamelCase__ ( self : List[Any] , _snake_case : List[Any] , _snake_case : int , _snake_case : Tuple ) -> Optional[int]:
"""simple docstring"""
A_ = noise_time
if not torch.is_tensor(_snake_case ):
A_ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_snake_case ) and len(timesteps.shape ) == 0:
A_ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
A_ = self.decoder(
encodings_and_masks=_snake_case , decoder_input_tokens=_snake_case , decoder_noise_time=_snake_case )
return logits
@torch.no_grad()
def __call__( self : List[Any] , _snake_case : List[List[int]] , _snake_case : Optional[torch.Generator] = None , _snake_case : int = 100 , _snake_case : bool = True , _snake_case : str = "numpy" , _snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _snake_case : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_snake_case , _snake_case ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(_snake_case )}.' )
A_ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
A_ = np.zeros([1, 0, self.n_dims] , np.floataa )
A_ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_snake_case , device=self.device )
for i, encoder_input_tokens in enumerate(_snake_case ):
if i == 0:
A_ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
A_ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_snake_case , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
A_ = ones
A_ = self.scale_features(
_snake_case , output_range=[-1.0, 1.0] , clip=_snake_case )
A_ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_snake_case , continuous_mask=_snake_case , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
A_ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_snake_case , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_snake_case )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
A_ = self.decode(
encodings_and_masks=_snake_case , input_tokens=_snake_case , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
A_ = self.scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample
A_ = self.scale_to_features(_snake_case , input_range=[-1.0, 1.0] )
A_ = mel[:1]
A_ = mel.cpu().float().numpy()
A_ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_snake_case , _snake_case )
logger.info("Generated segment" , _snake_case )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
A_ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
A_ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_snake_case )
| 115
| 0
|
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowercase_ = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowercase_ = logging.get_logger(__name__)
class __a ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = "maskformer"
SCREAMING_SNAKE_CASE = {"hidden_size": "mask_feature_size"}
SCREAMING_SNAKE_CASE = ["resnet", "swin"]
SCREAMING_SNAKE_CASE = ["detr"]
def __init__( self : Dict , snake_case_ : int = 2_56 , snake_case_ : int = 2_56 , snake_case_ : float = 0.1 , snake_case_ : bool = False , snake_case_ : Optional[Dict] = None , snake_case_ : Optional[Dict] = None , snake_case_ : float = 0.0_2 , snake_case_ : float = 1.0 , snake_case_ : float = 1.0 , snake_case_ : float = 1.0 , snake_case_ : float = 2_0.0 , snake_case_ : Optional[bool] = None , **snake_case_ : List[str] , )-> Tuple:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__lowerCAmelCase =SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(snake_case_ , snake_case_):
__lowerCAmelCase =backbone_config.pop("""model_type""")
__lowerCAmelCase =CONFIG_MAPPING[backbone_model_type]
__lowerCAmelCase =config_class.from_dict(snake_case_)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
F"""Supported model types: {','.join(self.backbones_supported)}""")
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__lowerCAmelCase =DetrConfig()
else:
# verify that the decoder is supported
__lowerCAmelCase =(
decoder_config.pop("""model_type""") if isinstance(snake_case_ , snake_case_) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"""Transformer Decoder {decoder_type} not supported, please use one of"""
F""" {','.join(self.decoders_supported)}""")
if isinstance(snake_case_ , snake_case_):
__lowerCAmelCase =CONFIG_MAPPING[decoder_type]
__lowerCAmelCase =config_class.from_dict(snake_case_)
__lowerCAmelCase =backbone_config
__lowerCAmelCase =decoder_config
# main feature dimension for the model
__lowerCAmelCase =fpn_feature_size
__lowerCAmelCase =mask_feature_size
# initializer
__lowerCAmelCase =init_std
__lowerCAmelCase =init_xavier_std
# Hungarian matcher && loss
__lowerCAmelCase =cross_entropy_weight
__lowerCAmelCase =dice_weight
__lowerCAmelCase =mask_weight
__lowerCAmelCase =use_auxiliary_loss
__lowerCAmelCase =no_object_weight
__lowerCAmelCase =output_auxiliary_logits
__lowerCAmelCase =self.decoder_config.encoder_attention_heads
__lowerCAmelCase =self.decoder_config.num_hidden_layers
super().__init__(**snake_case_)
@classmethod
def UpperCamelCase ( cls : Union[str, Any] , snake_case_ : PretrainedConfig , snake_case_ : PretrainedConfig , **snake_case_ : Dict)-> Any:
return cls(
backbone_config=snake_case_ , decoder_config=snake_case_ , **snake_case_ , )
def UpperCamelCase ( self : Dict)-> Dict[str, any]:
__lowerCAmelCase =copy.deepcopy(self.__dict__)
__lowerCAmelCase =self.backbone_config.to_dict()
__lowerCAmelCase =self.decoder_config.to_dict()
__lowerCAmelCase =self.__class__.model_type
return output
| 456
|
import math
from numpy import inf
from scipy.integrate import quad
def __lowerCAmelCase ( __lowerCamelCase : float ) -> float:
if num <= 0:
raise ValueError("""math domain error""" )
return quad(__lowerCamelCase , 0 , __lowerCamelCase , args=(__lowerCamelCase) )[0]
def __lowerCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : float ) -> float:
return math.pow(__lowerCamelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 456
| 1
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
if hor == 128:
_snake_case : Any = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
_snake_case : Tuple = (32, 128, 256)
_snake_case : Optional[Any] = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
_snake_case : List[str] = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
_snake_case : Any = (32, 64, 128, 256)
_snake_case : List[str] = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
_snake_case : Tuple = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
_snake_case : Union[str, Any] = model.state_dict()
_snake_case : str = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 65_536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
_snake_case : Dict = UNetaDModel(**SCREAMING_SNAKE_CASE__ )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
_snake_case : str = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_snake_case : Tuple = state_dict.pop(SCREAMING_SNAKE_CASE__ )
hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE__ )
torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase ( ) -> Optional[int]:
_snake_case : List[str] = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 65_536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
_snake_case : Optional[int] = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
_snake_case : Union[str, Any] = model
_snake_case : List[Any] = UNetaDModel(**SCREAMING_SNAKE_CASE__ )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
_snake_case : Optional[int] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_snake_case : Optional[int] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE__ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 477
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any]=13 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Optional[Any]=16 , lowerCAmelCase : Union[str, Any]=[32, 64, 128] , lowerCAmelCase : Tuple=[1, 2, 1] , lowerCAmelCase : Dict=[2, 2, 4] , lowerCAmelCase : Dict=2 , lowerCAmelCase : Any=2.0 , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : str=0.0 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Optional[Any]="gelu" , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=0.02 , lowerCAmelCase : Optional[int]=1E-5 , lowerCAmelCase : Any=True , lowerCAmelCase : Dict=None , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=10 , lowerCAmelCase : str=8 , lowerCAmelCase : int=["stage1", "stage2"] , lowerCAmelCase : List[str]=[1, 2] , ) -> List[str]:
"""simple docstring"""
_snake_case : List[Any] = parent
_snake_case : List[Any] = batch_size
_snake_case : Dict = image_size
_snake_case : Tuple = patch_size
_snake_case : Union[str, Any] = num_channels
_snake_case : Dict = embed_dim
_snake_case : Union[str, Any] = hidden_sizes
_snake_case : int = depths
_snake_case : Tuple = num_heads
_snake_case : Any = window_size
_snake_case : int = mlp_ratio
_snake_case : Union[str, Any] = qkv_bias
_snake_case : Optional[Any] = hidden_dropout_prob
_snake_case : Any = attention_probs_dropout_prob
_snake_case : List[str] = drop_path_rate
_snake_case : Union[str, Any] = hidden_act
_snake_case : Any = use_absolute_embeddings
_snake_case : Dict = patch_norm
_snake_case : List[Any] = layer_norm_eps
_snake_case : Optional[int] = initializer_range
_snake_case : List[Any] = is_training
_snake_case : Dict = scope
_snake_case : Any = use_labels
_snake_case : int = type_sequence_label_size
_snake_case : int = encoder_stride
_snake_case : Optional[Any] = out_features
_snake_case : Any = out_indices
def UpperCamelCase_ ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_snake_case : Dict = None
if self.use_labels:
_snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_snake_case : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Optional[Any]) -> Any:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[int] = FocalNetModel(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : Optional[Any] = model(lowerCAmelCase)
_snake_case : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_snake_case : Any = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def UpperCamelCase_ ( self : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple) -> Any:
"""simple docstring"""
_snake_case : Any = FocalNetBackbone(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : int = model(lowerCAmelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size, 8, 8])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1])
# verify backbone works with out_features=None
_snake_case : Tuple = None
_snake_case : str = FocalNetBackbone(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : List[str] = model(lowerCAmelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size * 2, 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def UpperCamelCase_ ( self : Any , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple) -> Optional[Any]:
"""simple docstring"""
_snake_case : Optional[int] = FocalNetForMaskedImageModeling(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : List[str] = model(lowerCAmelCase)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
_snake_case : Dict = 1
_snake_case : Union[str, Any] = FocalNetForMaskedImageModeling(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_snake_case : Optional[int] = model(lowerCAmelCase)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def UpperCamelCase_ ( self : int , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple) -> Tuple:
"""simple docstring"""
_snake_case : List[str] = self.type_sequence_label_size
_snake_case : List[str] = FocalNetForImageClassification(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : Optional[int] = model(lowerCAmelCase , labels=lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_snake_case : List[str] = 1
_snake_case : str = FocalNetForImageClassification(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_snake_case : int = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase_ ( self : Tuple) -> Dict:
"""simple docstring"""
_snake_case : int = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : Optional[int] = config_and_inputs
_snake_case : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : int = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case_ : Optional[int] = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case_ : List[Any] = False
snake_case_ : List[str] = False
snake_case_ : Dict = False
snake_case_ : str = False
snake_case_ : Optional[Any] = False
def UpperCamelCase_ ( self : List[str]) -> Any:
"""simple docstring"""
_snake_case : List[str] = FocalNetModelTester(self)
_snake_case : List[str] = ConfigTester(self , config_class=lowerCAmelCase , embed_dim=37 , has_text_modality=lowerCAmelCase)
def UpperCamelCase_ ( self : Optional[Any]) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self : Optional[int]) -> Tuple:
"""simple docstring"""
return
def UpperCamelCase_ ( self : Optional[int]) -> Dict:
"""simple docstring"""
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase)
def UpperCamelCase_ ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase)
def UpperCamelCase_ ( self : str) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase)
def UpperCamelCase_ ( self : Tuple) -> int:
"""simple docstring"""
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase)
@unittest.skip(reason="""FocalNet does not use inputs_embeds""")
def UpperCamelCase_ ( self : List[Any]) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""")
def UpperCamelCase_ ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCamelCase_ ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_snake_case : List[str] = model_class(lowerCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_snake_case : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear))
def UpperCamelCase_ ( self : Optional[Any]) -> int:
"""simple docstring"""
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_snake_case : Optional[int] = model_class(lowerCAmelCase)
_snake_case : List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Dict = [*signature.parameters.keys()]
_snake_case : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase)
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : str) -> List[Any]:
"""simple docstring"""
_snake_case : str = model_class(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
with torch.no_grad():
_snake_case : int = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase))
_snake_case : List[Any] = outputs.hidden_states
_snake_case : Optional[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths) + 1)
self.assertEqual(len(lowerCAmelCase) , lowerCAmelCase)
# FocalNet has a different seq_length
_snake_case : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_snake_case : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
_snake_case : int = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase) , lowerCAmelCase)
_snake_case , _snake_case , _snake_case , _snake_case : str = reshaped_hidden_states[0].shape
_snake_case : Any = (
reshaped_hidden_states[0].view(lowerCAmelCase , lowerCAmelCase , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase_ ( self : Dict) -> List[str]:
"""simple docstring"""
_snake_case , _snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_snake_case : int = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Optional[Any] = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
def UpperCamelCase_ ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_snake_case , _snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = 3
_snake_case : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_snake_case : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_snake_case : List[str] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_snake_case : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_snake_case : Union[str, Any] = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : List[Any] = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , (padded_height, padded_width))
@slow
def UpperCamelCase_ ( self : Any) -> Tuple:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Union[str, Any] = FocalNetModel.from_pretrained(lowerCAmelCase)
self.assertIsNotNone(lowerCAmelCase)
def UpperCamelCase_ ( self : Dict) -> List[str]:
"""simple docstring"""
_snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Optional[int] = _config_zero_init(lowerCAmelCase)
for model_class in self.all_model_classes:
_snake_case : str = model_class(config=lowerCAmelCase)
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : Any) -> Any:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""") if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
_snake_case : Any = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""").to(lowerCAmelCase)
_snake_case : Any = self.default_image_processor
_snake_case : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
_snake_case : Optional[int] = image_processor(images=lowerCAmelCase , return_tensors="""pt""").to(lowerCAmelCase)
# forward pass
with torch.no_grad():
_snake_case : List[str] = model(**lowerCAmelCase)
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowerCAmelCase)
_snake_case : str = torch.tensor([0.2_166, -0.4_368, 0.2_191]).to(lowerCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4))
self.assertTrue(outputs.logits.argmax(dim=-1).item() , 281)
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : List[str] = (FocalNetBackbone,) if is_torch_available() else ()
snake_case_ : Any = FocalNetConfig
snake_case_ : Optional[Any] = False
def UpperCamelCase_ ( self : int) -> Dict:
"""simple docstring"""
_snake_case : Optional[Any] = FocalNetModelTester(self)
| 477
| 1
|
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase =False, False, False
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
_lowerCamelCase = None
_lowerCamelCase = True
_lowerCamelCase = True
_lowerCamelCase = None
# Automatically constructed
_lowerCamelCase = "dict"
_lowerCamelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
_lowerCamelCase = field(default='''Audio''' , init=SCREAMING_SNAKE_CASE , repr=SCREAMING_SNAKE_CASE )
def __call__( self ) -> Optional[Any]:
return self.pa_type
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
return {"bytes": None, "path": value}
elif isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
A = BytesIO()
sf.write(lowerCamelCase_ ,value["""array"""] ,value["""sampling_rate"""] ,format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
A = np.frombuffer(value["""bytes"""] ,dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
A = np.memmap(value["""path"""] ,dtype="""h""" ,mode="""r""" ).astype(np.floataa ) / 3_2_7_6_7
A = BytesIO(bytes() )
sf.write(lowerCamelCase_ ,lowerCamelCase_ ,value["""sampling_rate"""] ,format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> dict:
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
A , A = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
A = xsplitext(lowerCamelCase_ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
A = token_per_repo_id or {}
A = path.split("""::""" )[-1]
try:
A = string_to_dict(lowerCamelCase_ ,config.HUB_DATASETS_URL )["""repo_id"""]
A = token_per_repo_id[repo_id]
except (ValueError, KeyError):
A = None
with xopen(lowerCamelCase_ ,"""rb""" ,use_auth_token=lowerCamelCase_ ) as f:
A , A = sf.read(lowerCamelCase_ )
else:
A , A = sf.read(lowerCamelCase_ )
A = array.T
if self.mono:
A = librosa.to_mono(lowerCamelCase_ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
A = librosa.resample(lowerCamelCase_ ,orig_sr=lowerCamelCase_ ,target_sr=self.sampling_rate )
A = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCamelCase__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
A = pa.array([None] * len(lowerCamelCase_ ) ,type=pa.binary() )
A = pa.StructArray.from_arrays([bytes_array, storage] ,["""bytes""", """path"""] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
A = pa.array([None] * len(lowerCamelCase_ ) ,type=pa.string() )
A = pa.StructArray.from_arrays([storage, path_array] ,["""bytes""", """path"""] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
A = pa.array([Audio().encode_example(lowerCamelCase_ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
A = storage.field("""bytes""" )
else:
A = pa.array([None] * len(lowerCamelCase_ ) ,type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
A = storage.field("""path""" )
else:
A = pa.array([None] * len(lowerCamelCase_ ) ,type=pa.string() )
A = pa.StructArray.from_arrays([bytes_array, path_array] ,["""bytes""", """path"""] ,mask=storage.is_null() )
return array_cast(lowerCamelCase_ ,self.pa_type )
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(lowerCamelCase_ ):
with xopen(lowerCamelCase_ ,"""rb""" ) as f:
A = f.read()
return bytes_
A = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
A = pa.array(
[os.path.basename(lowerCamelCase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] ,type=pa.string() ,)
A = pa.StructArray.from_arrays([bytes_array, path_array] ,["""bytes""", """path"""] ,mask=bytes_array.is_null() )
return array_cast(lowerCamelCase_ ,self.pa_type )
| 255
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
UpperCAmelCase =[
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def _A ( _a : Union[str, Any] , _a : int=None , _a : List[str]=None , _a : Optional[int]=None ):
"""simple docstring"""
A = True
while ask_again:
A = input(_a )
try:
if default is not None and len(_a ) == 0:
return default
return convert_value(_a ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_a )
def _A ( _a : List[str] , _a : str=[] , _a : Union[str, Any]=None , _a : Dict=0 ):
"""simple docstring"""
A = BulletMenu(_a , _a )
A = menu.run(default_choice=_a )
return convert_value(_a ) if convert_value is not None else result
def _A ( _a : Tuple ):
"""simple docstring"""
A = int(_a )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def _A ( _a : Any ):
"""simple docstring"""
A = int(_a )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def _A ( _a : str ):
"""simple docstring"""
A = int(_a )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _A ( _a : Dict ):
"""simple docstring"""
A = int(_a )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def _A ( _a : List[Any] ):
"""simple docstring"""
A = int(_a )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def _A ( _a : List[Any] ):
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class lowerCamelCase__ ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Union[str, Any]:
A = super()._format_usage(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
A = usage.replace("""<command> [<args>] """ ,"""""" )
return usage
| 255
| 1
|
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase_ : Union[str, Any] = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
lowercase_ : Union[str, Any] = F"""{src_lang}-{tgt_lang}"""
lowercase_ : Dict = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=_UpperCamelCase , exist_ok=_UpperCamelCase )
lowercase_ : int = os.path.join(_UpperCamelCase , "README.md" )
print(F"""Generating {path}""" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(_UpperCamelCase )
# make sure we are under the root of the project
UpperCamelCase__ = Path(__file__).resolve().parent.parent.parent
UpperCamelCase__ = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCamelCase__ = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 620
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Union[str, Any] = 'deta'
__lowerCamelCase: str = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[str] , a : Union[str, Any]=None , a : Dict=9_0_0 , a : str=2_0_4_8 , a : Union[str, Any]=6 , a : int=2_0_4_8 , a : Optional[int]=8 , a : Optional[Any]=6 , a : Optional[Any]=1_0_2_4 , a : Any=8 , a : Dict=0.0 , a : Optional[int]=True , a : Union[str, Any]="relu" , a : str=2_5_6 , a : str=0.1 , a : Any=0.0 , a : Optional[Any]=0.0 , a : str=0.02 , a : int=1.0 , a : Tuple=True , a : Optional[int]=False , a : str="sine" , a : List[Any]=5 , a : str=4 , a : Any=4 , a : Optional[Any]=True , a : Union[str, Any]=3_0_0 , a : List[Any]=True , a : Tuple=True , a : Any=1 , a : int=5 , a : Optional[int]=2 , a : Tuple=1 , a : List[str]=1 , a : Tuple=5 , a : Tuple=2 , a : List[str]=0.1 , a : Tuple=0.25 , **a : Optional[int] , ):
'''simple docstring'''
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowercase_ : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(a , a ):
lowercase_ : Any = backbone_config.pop("model_type" )
lowercase_ : str = CONFIG_MAPPING[backbone_model_type]
lowercase_ : Any = config_class.from_dict(a )
lowercase_ : Optional[int] = backbone_config
lowercase_ : Dict = num_queries
lowercase_ : Tuple = max_position_embeddings
lowercase_ : Union[str, Any] = d_model
lowercase_ : Optional[Any] = encoder_ffn_dim
lowercase_ : Any = encoder_layers
lowercase_ : int = encoder_attention_heads
lowercase_ : Tuple = decoder_ffn_dim
lowercase_ : Dict = decoder_layers
lowercase_ : Dict = decoder_attention_heads
lowercase_ : List[Any] = dropout
lowercase_ : Union[str, Any] = attention_dropout
lowercase_ : List[str] = activation_dropout
lowercase_ : Optional[Any] = activation_function
lowercase_ : str = init_std
lowercase_ : Dict = init_xavier_std
lowercase_ : List[Any] = encoder_layerdrop
lowercase_ : Optional[int] = auxiliary_loss
lowercase_ : str = position_embedding_type
# deformable attributes
lowercase_ : Optional[int] = num_feature_levels
lowercase_ : List[Any] = encoder_n_points
lowercase_ : Optional[Any] = decoder_n_points
lowercase_ : List[Any] = two_stage
lowercase_ : Any = two_stage_num_proposals
lowercase_ : List[str] = with_box_refine
lowercase_ : Optional[int] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowercase_ : int = class_cost
lowercase_ : Optional[int] = bbox_cost
lowercase_ : str = giou_cost
# Loss coefficients
lowercase_ : Optional[int] = mask_loss_coefficient
lowercase_ : Union[str, Any] = dice_loss_coefficient
lowercase_ : Optional[Any] = bbox_loss_coefficient
lowercase_ : Optional[Any] = giou_loss_coefficient
lowercase_ : Any = eos_coefficient
lowercase_ : str = focal_alpha
super().__init__(is_encoder_decoder=a , **a )
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.d_model
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[Any] = copy.deepcopy(self.__dict__ )
lowercase_ : Union[str, Any] = self.backbone_config.to_dict()
lowercase_ : str = self.__class__.model_type
return output
| 620
| 1
|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase :List[str] = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> int:
# Initialise PyTorch model
_a = XLNetConfig.from_json_file(_UpperCamelCase )
_a = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_a = finetuning_task
_a = GLUE_TASKS_NUM_LABELS[finetuning_task]
_a = XLNetForSequenceClassification(_UpperCamelCase )
elif "squad" in finetuning_task:
_a = finetuning_task
_a = XLNetForQuestionAnswering(_UpperCamelCase )
else:
_a = XLNetLMHeadModel(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
_a = os.path.join(_UpperCamelCase , _UpperCamelCase )
_a = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(f"Save PyTorch model to {os.path.abspath(_UpperCamelCase )}" )
torch.save(model.state_dict() , _UpperCamelCase )
print(f"Save configuration file to {os.path.abspath(_UpperCamelCase )}" )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
lowerCamelCase :int = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 346
|
import csv
import tweepy
# Twitter API credentials
lowerCamelCase :Optional[int] = ''
lowerCamelCase :Tuple = ''
lowerCamelCase :Tuple = ''
lowerCamelCase :Optional[Any] = ''
def __snake_case ( _UpperCamelCase ) -> None:
# authorize twitter, initialize tweepy
_a = tweepy.OAuthHandler(_UpperCamelCase , _UpperCamelCase )
auth.set_access_token(_UpperCamelCase , _UpperCamelCase )
_a = tweepy.API(_UpperCamelCase )
# initialize a list to hold all the tweepy Tweets
_a = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_a = api.user_timeline(screen_name=_UpperCamelCase , count=2_00 )
# save most recent tweets
alltweets.extend(_UpperCamelCase )
# save the id of the oldest tweet less one
_a = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_UpperCamelCase ) > 0:
print(f"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
_a = api.user_timeline(
screen_name=_UpperCamelCase , count=2_00 , max_id=_UpperCamelCase )
# save most recent tweets
alltweets.extend(_UpperCamelCase )
# update the id of the oldest tweet less one
_a = alltweets[-1].id - 1
print(f"...{len(_UpperCamelCase )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
_a = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"new_{screen_name}_tweets.csv" , '''w''' ) as f:
_a = csv.writer(_UpperCamelCase )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(_UpperCamelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 346
| 1
|
"""simple docstring"""
from itertools import permutations
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
UpperCamelCase : Optional[int] = [7, 11, 13, 17]
for i, test in enumerate(SCREAMING_SNAKE_CASE ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCamelCase (SCREAMING_SNAKE_CASE = 10 ):
return sum(
int("""""".join(map(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
for num in permutations(range(SCREAMING_SNAKE_CASE ) )
if is_substring_divisible(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 102
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCamelCase :
def __init__( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : str=13 , UpperCamelCase : Optional[Any]=[30, 30] , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : List[Any]=3 , UpperCamelCase : Any=True , UpperCamelCase : List[Any]=True , UpperCamelCase : str=32 , UpperCamelCase : str=5 , UpperCamelCase : Dict=4 , UpperCamelCase : Tuple=37 , UpperCamelCase : Tuple="gelu" , UpperCamelCase : Optional[int]=0.1 , UpperCamelCase : Optional[int]=0.1 , UpperCamelCase : int=10 , UpperCamelCase : Any=0.02 , UpperCamelCase : str=3 , UpperCamelCase : Tuple=None , UpperCamelCase : List[Any]=8 , UpperCamelCase : str=10 , ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : Union[str, Any] = image_size
lowerCAmelCase__ : Optional[Any] = patch_size
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : int = is_training
lowerCAmelCase__ : Dict = use_labels
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : List[str] = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : Any = type_sequence_label_size
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : str = num_labels
lowerCAmelCase__ : List[Any] = scope
lowerCAmelCase__ : Dict = n_targets
lowerCAmelCase__ : Any = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCAmelCase__ : Any = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCAmelCase__ : Optional[int] = num_patches + 1 + self.num_detection_tokens
def _lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCAmelCase__ : Union[str, Any] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCAmelCase__ : Tuple = []
for i in range(self.batch_size ):
lowerCAmelCase__ : Optional[Any] = {}
lowerCAmelCase__ : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase )
lowerCAmelCase__ : Tuple = torch.rand(self.n_targets , 4 , device=UpperCamelCase )
labels.append(UpperCamelCase )
lowerCAmelCase__ : Any = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Dict = YolosModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCAmelCase__ : int = model(UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _lowerCAmelCase ( self : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = YolosForObjectDetection(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCAmelCase__ : Optional[int] = model(pixel_values=UpperCamelCase )
lowerCAmelCase__ : Dict = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCAmelCase__ : List[str] = model(pixel_values=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = config_and_inputs
lowerCAmelCase__ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( a_ , a_ , unittest.TestCase ):
_lowerCamelCase :str = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_lowerCamelCase :Union[str, Any] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
_lowerCamelCase :Any = False
_lowerCamelCase :Tuple = False
_lowerCamelCase :Tuple = False
_lowerCamelCase :Union[str, Any] = False
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : List[str]=False ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCAmelCase__ : str = []
for i in range(self.model_tester.batch_size ):
lowerCAmelCase__ : List[Any] = {}
lowerCAmelCase__ : Optional[int] = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase , dtype=torch.long )
lowerCAmelCase__ : int = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase , dtype=torch.float )
labels.append(UpperCamelCase )
lowerCAmelCase__ : int = labels
return inputs_dict
def _lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = YolosModelTester(self )
lowerCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
# YOLOS does not use inputs_embeds
pass
def _lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = model_class(UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) )
def _lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = model_class(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase__ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def _lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Dict = True
# in YOLOS, the seq_len is different
lowerCAmelCase__ : Any = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : List[Any] = False
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : str = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCAmelCase__ : List[Any] = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Tuple = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : str = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCAmelCase__ : int = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCAmelCase__ : int = len(UpperCamelCase )
# Check attention is always last and order is fine
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : Dict = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Dict = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCAmelCase__ : List[Any] = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase ) )
lowerCAmelCase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] ):
lowerCAmelCase__ : Dict = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCAmelCase__ : List[Any] = outputs.hidden_states
lowerCAmelCase__ : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# YOLOS has a different seq_length
lowerCAmelCase__ : List[str] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[Any] = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Optional[Any] = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase )
@slow
def _lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Union[str, Any] = YolosModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def lowercase_ ( ) -> List[Any]:
lowerCAmelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
@cached_property
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""" ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""" ).to(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = self.default_image_processor
lowerCAmelCase__ : List[Any] = prepare_img()
lowerCAmelCase__ : Union[str, Any] = image_processor(images=UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Tuple = model(inputs.pixel_values )
# verify outputs
lowerCAmelCase__ : Union[str, Any] = torch.Size((1, 1_00, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
lowerCAmelCase__ : Any = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=UpperCamelCase , )
lowerCAmelCase__ : Any = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase , atol=1E-4 ) )
# verify postprocessing
lowerCAmelCase__ : Optional[int] = image_processor.post_process_object_detection(
UpperCamelCase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCAmelCase__ : Tuple = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(UpperCamelCase )
lowerCAmelCase__ : Tuple = [75, 75, 17, 63, 17]
lowerCAmelCase__ : Tuple = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(UpperCamelCase )
self.assertEqual(len(results["""scores"""] ) , 5 )
self.assertTrue(torch.allclose(results["""scores"""] , UpperCamelCase , atol=1E-4 ) )
self.assertSequenceEqual(results["""labels"""].tolist() , UpperCamelCase )
self.assertTrue(torch.allclose(results["""boxes"""][0, :] , UpperCamelCase ) )
| 299
| 0
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[str] = sorted(numsa + numsa )
_UpperCAmelCase : Dict = divmod(len(__UpperCamelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = [float(x) for x in input('Enter the elements of first array: ').split()]
lowerCamelCase__ = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 721
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 40
| 0
|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A = None , ):
super().__init__()
self.register_modules(transformer=__A , vae=__A , scheduler=__A )
# create a imagenet -> id dictionary for easier use
__a = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
__a = int(__A )
__a = dict(sorted(self.labels.items() ) )
def snake_case_ ( self , __A ):
if not isinstance(__A , __A ):
__a = list(__A )
for l in label:
if l not in self.labels:
raise ValueError(
f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , __A , __A = 4.0 , __A = None , __A = 50 , __A = "pil" , __A = True , ):
__a = len(__A )
__a = self.transformer.config.sample_size
__a = self.transformer.config.in_channels
__a = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__A , device=self.device , dtype=self.transformer.dtype , )
__a = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__a = torch.tensor(__A , device=self.device ).reshape(-1 )
__a = torch.tensor([1000] * batch_size , device=self.device )
__a = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__A )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__a = latent_model_input[: len(__A ) // 2]
__a = torch.cat([half, half] , dim=0 )
__a = self.scheduler.scale_model_input(__A , __A )
__a = t
if not torch.is_tensor(__A ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__a = latent_model_input.device.type == """mps"""
if isinstance(__A , __A ):
__a = torch.floataa if is_mps else torch.floataa
else:
__a = torch.intaa if is_mps else torch.intaa
__a = torch.tensor([timesteps] , dtype=__A , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__a = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__a = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__a = self.transformer(
__A , timestep=__A , class_labels=__A ).sample
# perform guidance
if guidance_scale > 1:
__a , __a = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__a , __a = torch.split(__A , len(__A ) // 2 , dim=0 )
__a = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__a = torch.cat([half_eps, half_eps] , dim=0 )
__a = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__a , __a = torch.split(__A , __A , dim=1 )
else:
__a = noise_pred
# compute previous image: x_t -> x_t-1
__a = self.scheduler.step(__A , __A , __A ).prev_sample
if guidance_scale > 1:
__a , __a = latent_model_input.chunk(2 , dim=0 )
else:
__a = latent_model_input
__a = 1 / self.vae.config.scaling_factor * latents
__a = self.vae.decode(__A ).sample
__a = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__a = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__a = self.numpy_to_pil(__A )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__A )
| 99
|
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
"""simple docstring"""
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__UpperCAmelCase : Optional[Any] = TapasConfig.from_json_file(UpperCamelCase )
# set absolute/relative position embeddings parameter
__UpperCAmelCase : Optional[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__UpperCAmelCase : List[str] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "WTQ":
# run_task_main.py hparams
__UpperCAmelCase : Tuple = 4
__UpperCAmelCase : Any = True
# hparam_utils.py hparams
__UpperCAmelCase : Union[str, Any] = 0.664694
__UpperCAmelCase : Union[str, Any] = 0.207951
__UpperCAmelCase : int = 0.121194
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : List[str] = 0.0352513
__UpperCAmelCase : Optional[int] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__UpperCAmelCase : int = 4
__UpperCAmelCase : Optional[int] = False
# hparam_utils.py hparams
__UpperCAmelCase : int = 36.4519
__UpperCAmelCase : str = 0.903421
__UpperCAmelCase : Dict = 222.088
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Any = 0.763141
__UpperCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "TABFACT":
__UpperCAmelCase : Union[str, Any] = TapasForSequenceClassification(config=UpperCamelCase )
elif task == "MLM":
__UpperCAmelCase : Tuple = TapasForMaskedLM(config=UpperCamelCase )
elif task == "INTERMEDIATE_PRETRAINING":
__UpperCAmelCase : List[str] = TapasModel(config=UpperCamelCase )
else:
raise ValueError(f"Task {task} not supported." )
print(f"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model (weights and configuration)
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(UpperCamelCase )
# Save tokenizer files
print(f"Save tokenizer files to {pytorch_dump_path}" )
__UpperCAmelCase : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(UpperCamelCase )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 77
| 0
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=UpperCAmelCase__ , )
assert hasattr(self , "env" )
def lowerCAmelCase__ ( self , UpperCAmelCase__=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-single''' , instance_count=UpperCAmelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCAmelCase__ , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def lowerCAmelCase__ ( self , UpperCAmelCase__ ):
TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
def lowerCAmelCase__ ( self ):
# create estimator
SCREAMING_SNAKE_CASE__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
SCREAMING_SNAKE_CASE__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , UpperCAmelCase__ )
| 112
|
"""simple docstring"""
from __future__ import annotations
def __lowercase ( lowerCamelCase_ : list , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
SCREAMING_SNAKE_CASE__ = result + left + right
return input_list
def __lowercase ( lowerCamelCase_ : list ):
if len(lowerCamelCase_ ) <= 1:
return input_list
SCREAMING_SNAKE_CASE__ = list(lowerCamelCase_ )
# iteration for two-way merging
SCREAMING_SNAKE_CASE__ = 2
while p <= len(lowerCamelCase_ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(lowerCamelCase_ ) , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = i + p - 1
SCREAMING_SNAKE_CASE__ = (low + high + 1) // 2
SCREAMING_SNAKE_CASE__ = merge(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# final merge of last two parts
if p * 2 >= len(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = merge(lowerCamelCase_ , 0 , lowerCamelCase_ , len(lowerCamelCase_ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
_lowerCamelCase = []
else:
_lowerCamelCase = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 112
| 1
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger()
@dataclass
class snake_case__ :
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = field(default_factory=__SCREAMING_SNAKE_CASE )
lowerCamelCase = field(default_factory=__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tensor , UpperCamelCase__ : Tensor ) -> Optional[Any]:
"""simple docstring"""
snake_case : Dict = len(list(m.modules() ) ) == 1 or isinstance(UpperCamelCase__ , nn.Convad ) or isinstance(UpperCamelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCamelCase__ )
def __call__( self : int , UpperCamelCase__ : Tensor ) -> Tuple:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCamelCase__ )
[x.remove() for x in self.handles]
return self
@property
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
return list(filter(lambda UpperCamelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class snake_case__ :
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 0
lowerCamelCase = field(default_factory=__SCREAMING_SNAKE_CASE )
lowerCamelCase = field(default_factory=__SCREAMING_SNAKE_CASE )
def __call__( self : str , UpperCamelCase__ : Tensor ) -> Dict:
"""simple docstring"""
snake_case : Union[str, Any] = Tracker(self.dest )(UpperCamelCase__ ).parametrized
snake_case : Tuple = Tracker(self.src )(UpperCamelCase__ ).parametrized
snake_case : Dict = list(filter(lambda UpperCamelCase__ : type(UpperCamelCase__ ) not in self.src_skip , UpperCamelCase__ ) )
snake_case : Tuple = list(filter(lambda UpperCamelCase__ : type(UpperCamelCase__ ) not in self.dest_skip , UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise Exception(
f'Numbers of operations are different. Source module has {len(UpperCamelCase__ )} operations while'
f' destination module has {len(UpperCamelCase__ )}.' )
for dest_m, src_m in zip(UpperCamelCase__ , UpperCamelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True ) -> Dict:
'''simple docstring'''
print(F'Converting {name}...' )
with torch.no_grad():
snake_case : List[str] = timm.create_model(SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ ).eval()
snake_case : str = ResNetForImageClassification(SCREAMING_SNAKE_CASE__ ).eval()
snake_case : Union[str, Any] = ModuleTransfer(src=SCREAMING_SNAKE_CASE__ , dest=SCREAMING_SNAKE_CASE__ )
snake_case : List[str] = torch.randn((1, 3, 224, 224) )
module_transfer(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(from_model(SCREAMING_SNAKE_CASE__ ) , our_model(SCREAMING_SNAKE_CASE__ ).logits ), "The model logits don't match the original one."
snake_case : Dict = F'resnet{"-".join(name.split("resnet" ) )}'
print(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
# we can use the convnext one
snake_case : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
print(F'Pushed {checkpoint_name}' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True ) -> Any:
'''simple docstring'''
snake_case : Any = '''imagenet-1k-id2label.json'''
snake_case : Optional[Any] = 1000
snake_case : Optional[Any] = (1, num_labels)
snake_case : Dict = '''huggingface/label-files'''
snake_case : str = num_labels
snake_case : Optional[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case : Dict = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case : int = idalabel
snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
snake_case : int = partial(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ )
snake_case : Optional[int] = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(SCREAMING_SNAKE_CASE__ , names_to_config[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, expected_shape
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
lowercase__ = parser.parse_args()
lowercase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 638
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = """swin"""
lowerCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Tuple , UpperCamelCase__ : int=224 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : str=3 , UpperCamelCase__ : str=96 , UpperCamelCase__ : List[Any]=[2, 2, 6, 2] , UpperCamelCase__ : Optional[Any]=[3, 6, 12, 24] , UpperCamelCase__ : Optional[Any]=7 , UpperCamelCase__ : Tuple=4.0 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Tuple=0.02 , UpperCamelCase__ : List[Any]=1e-5 , UpperCamelCase__ : Any=32 , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : int=None , **UpperCamelCase__ : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
snake_case : Any = image_size
snake_case : Optional[int] = patch_size
snake_case : List[Any] = num_channels
snake_case : Union[str, Any] = embed_dim
snake_case : str = depths
snake_case : str = len(UpperCamelCase__ )
snake_case : List[Any] = num_heads
snake_case : List[Any] = window_size
snake_case : Optional[int] = mlp_ratio
snake_case : Union[str, Any] = qkv_bias
snake_case : Optional[int] = hidden_dropout_prob
snake_case : Optional[int] = attention_probs_dropout_prob
snake_case : Optional[int] = drop_path_rate
snake_case : List[Any] = hidden_act
snake_case : int = use_absolute_embeddings
snake_case : str = layer_norm_eps
snake_case : Optional[Any] = initializer_range
snake_case : Any = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case : str = int(embed_dim * 2 ** (len(UpperCamelCase__ ) - 1) )
snake_case : int = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(UpperCamelCase__ ) + 1 )]
snake_case ,snake_case : List[Any] = get_aligned_output_features_output_indices(
out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names )
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = version.parse("""1.11""" )
@property
def lowerCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self : List[Any] ) -> float:
"""simple docstring"""
return 1e-4
| 638
| 1
|
import os
import pytest
from attr import dataclass
lowerCamelCase__ : Optional[int] = """us-east-1""" # defaults region
@dataclass
class __magic_name__ :
'''simple docstring'''
__lowercase : str
__lowercase : Any = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
__lowercase : List[str] = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 5_00,
"save_steps": 55_00,
}
__lowercase : Optional[int] = {**hyperparameters, "max_steps": 10_00}
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
return F"""{self.framework}-transfromers-test"""
@property
def SCREAMING_SNAKE_CASE__ ( self:str ):
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='''class''' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]:
snake_case__ = SageMakerTestEnvironment(framework=request.cls.framework )
| 700
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowerCamelCase__ : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __magic_name__ :
'''simple docstring'''
__lowercase : str = field(
default=snake_case_ ,metadata={'help': 'Model type selected in the list: ' + ', '.join(snake_case_ )} )
__lowercase : str = field(
default=snake_case_ ,metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
__lowercase : int = field(
default=1_28 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
__lowercase : int = field(
default=1_28 ,metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} ,)
__lowercase : int = field(
default=64 ,metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} ,)
__lowercase : int = field(
default=30 ,metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} ,)
__lowercase : bool = field(
default=snake_case_ ,metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowercase : bool = field(
default=snake_case_ ,metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
__lowercase : float = field(
default=0.0 ,metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowercase : int = field(
default=20 ,metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowercase : int = field(
default=0 ,metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} ,)
__lowercase : int = field(default=1 ,metadata={'help': 'multiple threads for converting example to features'} )
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = 'train'
__lowercase : int = 'dev'
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : SquadDataTrainingArguments
__lowercase : List[SquadFeatures]
__lowercase : Split
__lowercase : bool
def __init__( self:Optional[int] , _a:SquadDataTrainingArguments , _a:PreTrainedTokenizer , _a:Optional[int] = None , _a:Union[str, Split] = Split.train , _a:Optional[bool] = False , _a:Optional[str] = None , _a:Optional[str] = "pt" , ):
snake_case__ = args
snake_case__ = is_language_sensitive
snake_case__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_a , _a ):
try:
snake_case__ = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
snake_case__ = mode
# Load data features from cache or dataset file
snake_case__ = '''v2''' if args.version_2_with_negative else '''v1'''
snake_case__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case__ = cached_features_file + '''.lock'''
with FileLock(_a ):
if os.path.exists(_a ) and not args.overwrite_cache:
snake_case__ = time.time()
snake_case__ = torch.load(_a )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case__ = self.old_features['''features''']
snake_case__ = self.old_features.get('''dataset''' , _a )
snake_case__ = self.old_features.get('''examples''' , _a )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
''' future run''' )
else:
if mode == Split.dev:
snake_case__ = self.processor.get_dev_examples(args.data_dir )
else:
snake_case__ = self.processor.get_train_examples(args.data_dir )
snake_case__ , snake_case__ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_a , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_a , )
snake_case__ = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , _a , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self:Tuple ):
return len(self.features )
def __getitem__( self:Any , _a:int ):
# Convert to Tensors and build dataset
snake_case__ = self.features[i]
snake_case__ = torch.tensor(feature.input_ids , dtype=torch.long )
snake_case__ = torch.tensor(feature.attention_mask , dtype=torch.long )
snake_case__ = torch.tensor(feature.token_type_ids , dtype=torch.long )
snake_case__ = torch.tensor(feature.cls_index , dtype=torch.long )
snake_case__ = torch.tensor(feature.p_mask , dtype=torch.float )
snake_case__ = torch.tensor(feature.is_impossible , dtype=torch.float )
snake_case__ = {
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case__ = torch.tensor(feature.start_position , dtype=torch.long )
snake_case__ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 208
| 0
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__lowerCAmelCase : Tuple = "cuda" if torch.cuda.is_available() else "cpu"
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__=100 , lowerCamelCase__=" " ):
"""simple docstring"""
lowerCAmelCase__ = text.split(lowerCamelCase__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowerCamelCase__ ) , lowerCamelCase__ )]
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(lowerCamelCase__ ):
titles.append(title if title is not None else """""" )
texts.append(lowerCamelCase__ )
return {"title": titles, "text": texts}
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=lowerCamelCase__ , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
lowerCAmelCase__ = ctx_encoder(input_ids.to(device=lowerCamelCase__ ) , return_dict=lowerCamelCase__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCAmelCase__ = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCAmelCase__ = dataset.map(lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCAmelCase__ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowerCamelCase__ )
lowerCAmelCase__ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCAmelCase__ = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
lowerCAmelCase__ = dataset.map(
partial(lowerCamelCase__ , ctx_encoder=lowerCamelCase__ , ctx_tokenizer=lowerCamelCase__ ) , batched=lowerCamelCase__ , batch_size=processing_args.batch_size , features=lowerCamelCase__ , )
# And finally save your dataset
lowerCAmelCase__ = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(lowerCamelCase__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCAmelCase__ = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=lowerCamelCase__ )
# And save the index
lowerCAmelCase__ = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(lowerCamelCase__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class a_ :
UpperCamelCase_ : str = field(
default=str(Path(__UpperCamelCase ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
UpperCamelCase_ : Optional[str] = field(
default=__UpperCamelCase , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
UpperCamelCase_ : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
UpperCamelCase_ : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=str(Path(__UpperCamelCase ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class a_ :
UpperCamelCase_ : Optional[int] = field(
default=__UpperCamelCase , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
UpperCamelCase_ : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class a_ :
UpperCamelCase_ : int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
UpperCamelCase_ : int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__lowerCAmelCase : List[Any] = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : List[Any] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 644
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class a_ :
UpperCamelCase_ : Any = XGLMConfig
UpperCamelCase_ : int = {}
UpperCamelCase_ : Tuple = "gelu"
def __init__( self : Optional[int] , snake_case__ : Tuple , snake_case__ : List[str]=14 , snake_case__ : Union[str, Any]=7 , snake_case__ : Optional[Any]=True , snake_case__ : Optional[int]=True , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=99 , snake_case__ : Optional[Any]=32 , snake_case__ : List[Any]=2 , snake_case__ : Optional[Any]=4 , snake_case__ : List[str]=37 , snake_case__ : Optional[Any]="gelu" , snake_case__ : str=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[Any]=512 , snake_case__ : List[Any]=0.02 , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = ffn_dim
lowerCAmelCase__ = activation_function
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = None
lowerCAmelCase__ = 0
lowerCAmelCase__ = 2
lowerCAmelCase__ = 1
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = self.get_config()
lowerCAmelCase__ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Dict ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=snake_case__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=snake_case__ , )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase_ : Union[str, Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase_ : Tuple = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : str = False
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = TFXGLMModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , n_embd=37 )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
self.config_tester.run_common_tests()
@slow
def _SCREAMING_SNAKE_CASE ( self : int ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFXGLMModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def _SCREAMING_SNAKE_CASE ( self : str ):
super().test_resize_token_embeddings()
@require_tf
class a_ ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Optional[int]=True ):
lowerCAmelCase__ = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
lowerCAmelCase__ = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
lowerCAmelCase__ = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
lowerCAmelCase__ = model.generate(snake_case__ , do_sample=snake_case__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
lowerCAmelCase__ = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
lowerCAmelCase__ = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
lowerCAmelCase__ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
lowerCAmelCase__ = model.generate(snake_case__ , do_sample=snake_case__ , seed=[7, 0] )
lowerCAmelCase__ = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case__ )
lowerCAmelCase__ = (
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
lowerCAmelCase__ = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
lowerCAmelCase__ = """left"""
# use different length sentences to test batching
lowerCAmelCase__ = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
lowerCAmelCase__ = tokenizer(snake_case__ , return_tensors="""tf""" , padding=snake_case__ )
lowerCAmelCase__ = inputs["""input_ids"""]
lowerCAmelCase__ = model.generate(input_ids=snake_case__ , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
lowerCAmelCase__ = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
lowerCAmelCase__ = model.generate(input_ids=snake_case__ , max_new_tokens=12 )
lowerCAmelCase__ = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
lowerCAmelCase__ = model.generate(input_ids=snake_case__ , max_new_tokens=12 )
lowerCAmelCase__ = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
lowerCAmelCase__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case__ )
lowerCAmelCase__ = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case__ )
lowerCAmelCase__ = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , [non_padded_sentence, padded_sentence] )
| 644
| 1
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Tuple = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_a : Dict = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ) -> List[str]:
_lowerCAmelCase : Any = {}
with open(_lowerCamelCase ,"""r""" ) as file:
for line_number, line in enumerate(_lowerCamelCase ):
_lowerCAmelCase : List[str] = line.strip()
if line:
_lowerCAmelCase : Dict = line.split()
_lowerCAmelCase : List[str] = line_number
_lowerCAmelCase : Tuple = words[0]
_lowerCAmelCase : List[str] = value
return result
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ,_lowerCamelCase : Any ,_lowerCamelCase : List[Any] ,_lowerCamelCase : Any ) -> Dict:
for attribute in key.split(""".""" ):
_lowerCAmelCase : Dict = getattr(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCamelCase ):
_lowerCAmelCase : str = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_lowerCAmelCase : str = """param"""
if weight_type is not None and weight_type != "param":
_lowerCAmelCase : List[Any] = getattr(_lowerCamelCase ,_lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
_lowerCAmelCase : List[str] = hf_pointer
for attribute in hf_param_name.split(""".""" ):
_lowerCAmelCase : Any = getattr(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = shape_pointer.shape
# let's reduce dimension
_lowerCAmelCase : List[Any] = value[0]
else:
_lowerCAmelCase : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
_lowerCAmelCase : int = value
elif weight_type == "weight_g":
_lowerCAmelCase : Dict = value
elif weight_type == "weight_v":
_lowerCAmelCase : int = value
elif weight_type == "bias":
_lowerCAmelCase : Union[str, Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
_lowerCAmelCase : str = getattr(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = value
else:
_lowerCAmelCase : str = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Dict ,_lowerCamelCase : str ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : int ) -> Optional[int]:
_lowerCAmelCase : str = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_lowerCAmelCase : Union[str, Any] = """param"""
if weight_type is not None and weight_type != "param":
_lowerCAmelCase : Tuple = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_lowerCAmelCase : Union[str, Any] = """.""".join([key, hf_param_name] )
else:
_lowerCAmelCase : Optional[Any] = key
_lowerCAmelCase : List[Any] = value if """lm_head""" in full_key else value[0]
_a : Union[str, Any] = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Dict ,_lowerCamelCase : List[Any]=None ,_lowerCamelCase : Union[str, Any]=None ) -> Any:
_lowerCAmelCase : Any = False
for key, mapped_key in MAPPING.items():
_lowerCAmelCase : Optional[int] = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_lowerCAmelCase : Optional[int] = True
if "*" in mapped_key:
_lowerCAmelCase : List[Any] = name.split(_lowerCamelCase )[0].split(""".""" )[-2]
_lowerCAmelCase : Dict = mapped_key.replace("""*""" ,_lowerCamelCase )
if "weight_g" in name:
_lowerCAmelCase : List[str] = """weight_g"""
elif "weight_v" in name:
_lowerCAmelCase : int = """weight_v"""
elif "bias" in name:
_lowerCAmelCase : Optional[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase : List[str] = """weight"""
else:
_lowerCAmelCase : Optional[Any] = None
if hf_dict is not None:
rename_dict(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
else:
set_recursively(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
return is_used
return is_used
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Optional[int] ) -> Optional[Any]:
_lowerCAmelCase : Any = []
_lowerCAmelCase : str = fairseq_model.state_dict()
_lowerCAmelCase : Optional[Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,hf_model.config.feat_extract_norm == """group""" ,)
_lowerCAmelCase : Union[str, Any] = True
else:
_lowerCAmelCase : Optional[int] = load_wavaveca_layer(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ,_lowerCamelCase : int ,_lowerCamelCase : Union[str, Any] ) -> Dict:
_lowerCAmelCase : Optional[int] = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase : int = name.split(""".""" )
_lowerCAmelCase : List[Any] = int(items[0] )
_lowerCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_lowerCAmelCase : List[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_lowerCAmelCase : Tuple = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
_lowerCAmelCase : str = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
_lowerCAmelCase : Any = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : Dict ,_lowerCamelCase : str=None ,_lowerCamelCase : Union[str, Any]=None ,_lowerCamelCase : Optional[Any]=True ,_lowerCamelCase : List[Any]=False ) -> Union[str, Any]:
if config_path is not None:
_lowerCAmelCase : Tuple = WavaVecaConfig.from_pretrained(_lowerCamelCase )
else:
_lowerCAmelCase : List[Any] = WavaVecaConfig()
if is_seq_class:
_lowerCAmelCase : str = read_txt_into_dict(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = idalabel
_lowerCAmelCase : Optional[int] = WavaVecaForSequenceClassification(_lowerCamelCase )
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16000 ,padding_value=0 ,do_normalize=_lowerCamelCase ,return_attention_mask=_lowerCamelCase ,)
feature_extractor.save_pretrained(_lowerCamelCase )
elif is_finetuned:
if dict_path:
_lowerCAmelCase : Tuple = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCAmelCase : Optional[Any] = target_dict.pad_index
_lowerCAmelCase : str = target_dict.bos_index
_lowerCAmelCase : Dict = target_dict.eos_index
_lowerCAmelCase : List[Any] = len(target_dict.symbols )
_lowerCAmelCase : Tuple = os.path.join(_lowerCamelCase ,"""vocab.json""" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
_lowerCAmelCase : List[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : List[str] = 1
with open(_lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as vocab_handle:
json.dump(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : List[Any] = WavaVecaCTCTokenizer(
_lowerCamelCase ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=_lowerCamelCase ,)
_lowerCAmelCase : List[str] = True if config.feat_extract_norm == """layer""" else False
_lowerCAmelCase : Any = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16000 ,padding_value=0 ,do_normalize=_lowerCamelCase ,return_attention_mask=_lowerCamelCase ,)
_lowerCAmelCase : Dict = WavaVecaProcessor(feature_extractor=_lowerCamelCase ,tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
_lowerCAmelCase : int = WavaVecaForCTC(_lowerCamelCase )
else:
_lowerCAmelCase : str = WavaVecaForPreTraining(_lowerCamelCase )
if is_finetuned or is_seq_class:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_lowerCAmelCase : List[Any] = argparse.Namespace(task="""audio_pretraining""" )
_lowerCAmelCase : Tuple = fairseq.tasks.setup_task(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=_lowerCamelCase )
_lowerCAmelCase : List[Any] = model[0].eval()
recursively_load_weights(_lowerCamelCase ,_lowerCamelCase ,not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_a : Tuple = parser.parse_args()
_a : str = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 663
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a : Tuple = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , a__ , )
super().__init__(*a__ , **a__ )
| 663
| 1
|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
A_ = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 42
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ = 16
A_ = 32
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 16 ,__UpperCamelCase = "bert-base-cased" ) -> List[Any]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = load_dataset('glue' ,'mrpc' )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase_ = datasets.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase ,padding='max_length' ,max_length=1_28 ,return_tensors='pt' )
return tokenizer.pad(__UpperCamelCase ,padding='longest' ,return_tensors='pt' )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets['train'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
lowerCamelCase_ = DataLoader(
tokenized_datasets['validation'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
model.eval()
lowerCamelCase_ = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase_ ,lowerCamelCase_ = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__UpperCamelCase ) - 1:
lowerCamelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__UpperCamelCase ,references=__UpperCamelCase ,)
lowerCamelCase_ = metric.compute()
return eval_metric["accuracy"]
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[str]:
# Initialize accelerator
lowerCamelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config['lr']
lowerCamelCase_ = int(config['num_epochs'] )
lowerCamelCase_ = int(config['seed'] )
lowerCamelCase_ = int(config['batch_size'] )
lowerCamelCase_ = args.model_name_or_path
set_seed(__UpperCamelCase )
lowerCamelCase_ ,lowerCamelCase_ = get_dataloaders(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase ,return_dict=__UpperCamelCase )
# Instantiate optimizer
lowerCamelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase_ = optimizer_cls(params=model.parameters() ,lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowerCamelCase_ = 1
lowerCamelCase_ = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase ,num_warmup_steps=0 ,num_training_steps=__UpperCamelCase ,)
else:
lowerCamelCase_ = DummyScheduler(__UpperCamelCase ,total_num_steps=__UpperCamelCase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = accelerator.prepare(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase_ = 0
lowerCamelCase_ = evaluate.load('glue' ,'mrpc' )
lowerCamelCase_ = num_epochs
if args.partial_train_epoch is not None:
lowerCamelCase_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase_ = args.resume_from_checkpoint.split('epoch_' )[1]
lowerCamelCase_ = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCamelCase_ = int(__UpperCamelCase ) + 1
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
accelerator.print('resumed checkpoint performance:' ,__UpperCamelCase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' ,lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' ,optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir ,f'''state_{starting_epoch-1}.json''' ) ,'r' ) as f:
lowerCamelCase_ = json.load(__UpperCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCamelCase_ = {}
for epoch in range(__UpperCamelCase ,__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
lowerCamelCase_ = model(**__UpperCamelCase )
lowerCamelCase_ = outputs.loss
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCamelCase_ = f'''epoch_{epoch}'''
lowerCamelCase_ = os.path.join(args.output_dir ,__UpperCamelCase )
accelerator.save_state(__UpperCamelCase )
lowerCamelCase_ = evaluation_loop(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = accuracy
lowerCamelCase_ = lr_scheduler.get_lr()[0]
lowerCamelCase_ = optimizer.param_groups[0]['lr']
lowerCamelCase_ = epoch
lowerCamelCase_ = overall_step
accelerator.print(f'''epoch {epoch}:''' ,__UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,f'''state_{epoch}.json''' ) ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> str:
lowerCamelCase_ = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' ,type=__UpperCamelCase ,default='bert-base-cased' ,help='Path to pretrained model or model identifier from huggingface.co/models.' ,required=__UpperCamelCase ,)
parser.add_argument(
'--output_dir' ,type=__UpperCamelCase ,default='.' ,help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' ,)
parser.add_argument(
'--resume_from_checkpoint' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If the training should continue from a checkpoint folder.' ,)
parser.add_argument(
'--partial_train_epoch' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='If passed, the training will stop after this number of epochs.' ,)
parser.add_argument(
'--num_epochs' ,type=__UpperCamelCase ,default=2 ,help='Number of train epochs.' ,)
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 42
| 1
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class snake_case_ ( A__ ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] ='''bridgetower_vision_model'''
def __init__( self , UpperCamelCase=7_68 , UpperCamelCase=12 , UpperCamelCase=3 , UpperCamelCase=16 , UpperCamelCase=2_88 , UpperCamelCase=1 , UpperCamelCase=1E-0_5 , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=False , **UpperCamelCase , ):
super().__init__(**UpperCamelCase)
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_channels
lowerCamelCase__ = patch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = initializer_factor
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = stop_gradient
lowerCamelCase__ = share_layernorm
lowerCamelCase__ = remove_last_layer
@classmethod
def __UpperCAmelCase ( cls , UpperCamelCase , **UpperCamelCase):
lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(UpperCamelCase , **UpperCamelCase)
if config_dict.get("model_type") == "bridgetower":
lowerCamelCase__ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCamelCase , **UpperCamelCase)
class snake_case_ ( A__ ):
"""simple docstring"""
__lowerCAmelCase : int ='''bridgetower_text_model'''
def __init__( self , UpperCamelCase=5_02_65 , UpperCamelCase=7_68 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=1 , UpperCamelCase=30_72 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=5_14 , UpperCamelCase=1 , UpperCamelCase=1E-0_5 , UpperCamelCase=1 , UpperCamelCase=0 , UpperCamelCase=2 , UpperCamelCase="absolute" , UpperCamelCase=True , **UpperCamelCase , ):
super().__init__(**UpperCamelCase)
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = initializer_factor
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = position_embedding_type
lowerCamelCase__ = use_cache
lowerCamelCase__ = pad_token_id
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
@classmethod
def __UpperCAmelCase ( cls , UpperCamelCase , **UpperCamelCase):
lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(UpperCamelCase , **UpperCamelCase)
if config_dict.get("model_type") == "bridgetower":
lowerCamelCase__ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCamelCase , **UpperCamelCase)
class snake_case_ ( A__ ):
"""simple docstring"""
__lowerCAmelCase : Optional[Any] ='''bridgetower'''
def __init__( self , UpperCamelCase=True , UpperCamelCase="gelu" , UpperCamelCase=7_68 , UpperCamelCase=1 , UpperCamelCase=1E-0_5 , UpperCamelCase=False , UpperCamelCase="add" , UpperCamelCase=12 , UpperCamelCase=6 , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase , ):
# TODO: remove this once the Hub files are updated.
lowerCamelCase__ = kwargs.pop("text_config_dict" , UpperCamelCase)
lowerCamelCase__ = kwargs.pop("vision_config_dict" , UpperCamelCase)
super().__init__(**UpperCamelCase)
lowerCamelCase__ = share_cross_modal_transformer_layers
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_size
lowerCamelCase__ = initializer_factor
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = share_link_tower_layers
lowerCamelCase__ = link_tower_type
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = tie_word_embeddings
lowerCamelCase__ = init_layernorm_from_vision_encoder
if text_config is None:
lowerCamelCase__ = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.")
if vision_config is None:
lowerCamelCase__ = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.")
lowerCamelCase__ = BridgeTowerTextConfig(**UpperCamelCase)
lowerCamelCase__ = BridgeTowerVisionConfig(**UpperCamelCase)
@classmethod
def __UpperCAmelCase ( cls , UpperCamelCase , UpperCamelCase , **UpperCamelCase):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase)
def __UpperCAmelCase ( self):
lowerCamelCase__ = copy.deepcopy(self.__dict__)
lowerCamelCase__ = self.text_config.to_dict()
lowerCamelCase__ = self.vision_config.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
| 426
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class snake_case_ ( A__ ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] ='''xglm'''
__lowerCAmelCase : str =['''past_key_values''']
__lowerCAmelCase : Dict ={
'''num_attention_heads''': '''attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , UpperCamelCase=25_60_08 , UpperCamelCase=20_48 , UpperCamelCase=10_24 , UpperCamelCase=40_96 , UpperCamelCase=24 , UpperCamelCase=16 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.0_2 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=2 , UpperCamelCase=1 , UpperCamelCase=0 , UpperCamelCase=2 , **UpperCamelCase , ):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = d_model
lowerCamelCase__ = ffn_dim
lowerCamelCase__ = num_layers
lowerCamelCase__ = attention_heads
lowerCamelCase__ = activation_function
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = layerdrop
lowerCamelCase__ = init_std
lowerCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase__ = use_cache
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , decoder_start_token_id=UpperCamelCase , **UpperCamelCase , )
| 426
| 1
|
'''simple docstring'''
def __UpperCamelCase ( a : Dict , a : Union[str, Any] , a : Dict ) ->Optional[int]:
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(a , n - 1 , a ) * a) % mod
else:
snake_case = binary_exponentiation(a , n / 2 , a )
return (b * b) % mod
# a prime number
_lowercase = 701
_lowercase = 1_000_000_000
_lowercase = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 342
|
'''simple docstring'''
def __UpperCamelCase ( a : int = 50 ) ->int:
snake_case = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 342
| 1
|
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase ( __lowercase , unittest.TestCase ):
lowerCamelCase : Tuple = AudioLDMPipeline
lowerCamelCase : Union[str, Any] = TEXT_TO_AUDIO_PARAMS
lowerCamelCase : Tuple = TEXT_TO_AUDIO_BATCH_PARAMS
lowerCamelCase : List[Any] = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def lowercase__ ( self : int ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_lowercase , )
SCREAMING_SNAKE_CASE__ : str = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , projection_dim=32 , )
SCREAMING_SNAKE_CASE__ : Dict = ClapTextModelWithProjection(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_60_00 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_lowercase , )
SCREAMING_SNAKE_CASE__ : List[Any] = SpeechTaHifiGan(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def lowercase__ ( self : Dict , _lowercase : Optional[int] , _lowercase : List[str]=0 ):
if str(_lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
SCREAMING_SNAKE_CASE__ : str = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AudioLDMPipeline(**_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = audioldm_pipe(**_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(_lowercase ) == 2_56
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio[:10]
SCREAMING_SNAKE_CASE__ : Tuple = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : str = AudioLDMPipeline(**_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = audioldm_pipe.to(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_inputs(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = 3 * [inputs["prompt"]]
# forward
SCREAMING_SNAKE_CASE__ : Any = audioldm_pipe(**_lowercase )
SCREAMING_SNAKE_CASE__ : str = output.audios[0]
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = 3 * [inputs.pop('''prompt''' )]
SCREAMING_SNAKE_CASE__ : Optional[Any] = audioldm_pipe.tokenizer(
_lowercase , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_lowercase , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = text_inputs["input_ids"].to(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = audioldm_pipe.text_encoder(
_lowercase , )
SCREAMING_SNAKE_CASE__ : str = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE__ : List[Any] = F.normalize(_lowercase , dim=-1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = prompt_embeds
# forward
SCREAMING_SNAKE_CASE__ : List[Any] = audioldm_pipe(**_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : str = AudioLDMPipeline(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = audioldm_pipe.to(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self.get_dummy_inputs(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = 3 * ["this is a negative prompt"]
SCREAMING_SNAKE_CASE__ : str = negative_prompt
SCREAMING_SNAKE_CASE__ : List[Any] = 3 * [inputs["prompt"]]
# forward
SCREAMING_SNAKE_CASE__ : Optional[Any] = audioldm_pipe(**_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = output.audios[0]
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_dummy_inputs(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = 3 * [inputs.pop('''prompt''' )]
SCREAMING_SNAKE_CASE__ : List[str] = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE__ : str = audioldm_pipe.tokenizer(
_lowercase , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_lowercase , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ : List[str] = text_inputs["input_ids"].to(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = audioldm_pipe.text_encoder(
_lowercase , )
SCREAMING_SNAKE_CASE__ : Any = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE__ : Any = F.normalize(_lowercase , dim=-1 )
embeds.append(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = embeds
# forward
SCREAMING_SNAKE_CASE__ : Any = audioldm_pipe(**_lowercase )
SCREAMING_SNAKE_CASE__ : Any = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Tuple = PNDMScheduler(skip_prk_steps=_lowercase )
SCREAMING_SNAKE_CASE__ : int = AudioLDMPipeline(**_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ : str = self.get_dummy_inputs(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = "egg cracking"
SCREAMING_SNAKE_CASE__ : Optional[int] = audioldm_pipe(**_lowercase , negative_prompt=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = output.audios[0]
assert audio.ndim == 1
assert len(_lowercase ) == 2_56
SCREAMING_SNAKE_CASE__ : int = audio[:10]
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : int = PNDMScheduler(skip_prk_steps=_lowercase )
SCREAMING_SNAKE_CASE__ : str = AudioLDMPipeline(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ : int = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE__ : Optional[int] = audioldm_pipe(_lowercase , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE__ : List[Any] = 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE__ : Dict = 2
SCREAMING_SNAKE_CASE__ : str = audioldm_pipe(_lowercase , num_inference_steps=2 , num_waveforms_per_prompt=_lowercase ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE__ : Any = 2
SCREAMING_SNAKE_CASE__ : Dict = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_lowercase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Any = AudioLDMPipeline(**_lowercase )
SCREAMING_SNAKE_CASE__ : Any = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_inputs(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = audioldm_pipe(audio_length_in_s=0.016 , **_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(_lowercase ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE__ : Optional[Any] = audioldm_pipe(audio_length_in_s=0.032 , **_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(_lowercase ) / vocoder_sampling_rate == 0.032
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AudioLDMPipeline(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = ["hey"]
SCREAMING_SNAKE_CASE__ : Any = audioldm_pipe(_lowercase , num_inference_steps=1 )
SCREAMING_SNAKE_CASE__ : Any = output.audios.shape
assert audio_shape == (1, 2_56)
SCREAMING_SNAKE_CASE__ : str = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE__ : List[Any] = SpeechTaHifiGan(_lowercase ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = audioldm_pipe(_lowercase , num_inference_steps=1 )
SCREAMING_SNAKE_CASE__ : List[Any] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def lowercase__ ( self : Union[str, Any] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_lowercase )
def lowercase__ ( self : Dict ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=_lowercase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowercase__ ( self : List[str] ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_lowercase )
@slow
class lowercase ( unittest.TestCase ):
def lowercase__ ( self : Union[str, Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[str] , _lowercase : Optional[Any] , _lowercase : Dict="cpu" , _lowercase : str=torch.floataa , _lowercase : Union[str, Any]=0 ):
SCREAMING_SNAKE_CASE__ : List[Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.random.RandomState(_lowercase ).standard_normal((1, 8, 1_28, 16) )
SCREAMING_SNAKE_CASE__ : Tuple = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
SCREAMING_SNAKE_CASE__ : int = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = self.get_inputs(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = 25
SCREAMING_SNAKE_CASE__ : Dict = audioldm_pipe(**_lowercase ).audios[0]
assert audio.ndim == 1
assert len(_lowercase ) == 8_19_20
SCREAMING_SNAKE_CASE__ : Any = audio[7_72_30:7_72_40]
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = audioldm_pipe.to(_lowercase )
audioldm_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ : int = self.get_inputs(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = audioldm_pipe(**_lowercase ).audios[0]
assert audio.ndim == 1
assert len(_lowercase ) == 8_19_20
SCREAMING_SNAKE_CASE__ : Any = audio[2_77_80:2_77_90]
SCREAMING_SNAKE_CASE__ : Tuple = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 706
|
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def a ( A__ ) -> Any:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def a ( A__ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = np.max(_outputs , axis=-1 , keepdims=A__ )
SCREAMING_SNAKE_CASE__ : List[str] = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=A__ )
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[Any] = '''sigmoid'''
lowerCamelCase : int = '''softmax'''
lowerCamelCase : Any = '''none'''
@add_end_docstrings(
_UpperCAmelCase , r'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = False
lowerCamelCase : str = ClassificationFunction.NONE
def __init__( self : Dict , **_lowercase : Any ):
super().__init__(**_lowercase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def lowercase__ ( self : Any , _lowercase : Dict=None , _lowercase : str=None , _lowercase : int="" , **_lowercase : Optional[int] ):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer_kwargs
SCREAMING_SNAKE_CASE__ : Any = {}
if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None:
SCREAMING_SNAKE_CASE__ : str = self.model.config.return_all_scores
if isinstance(_lowercase , _lowercase ) or top_k is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = top_k
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , _lowercase , )
if return_all_scores:
SCREAMING_SNAKE_CASE__ : Tuple = None
else:
SCREAMING_SNAKE_CASE__ : int = 1
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
SCREAMING_SNAKE_CASE__ : Tuple = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[int] , *_lowercase : str , **_lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : Any = super().__call__(*_lowercase , **_lowercase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''top_k''' not in kwargs
if isinstance(args[0] , _lowercase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def lowercase__ ( self : List[Any] , _lowercase : List[Any] , **_lowercase : Dict ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.framework
if isinstance(_lowercase , _lowercase ):
return self.tokenizer(**_lowercase , return_tensors=_lowercase , **_lowercase )
elif isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1 and isinstance(inputs[0] , _lowercase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=_lowercase , **_lowercase )
elif isinstance(_lowercase , _lowercase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
def lowercase__ ( self : Union[str, Any] , _lowercase : List[Any] ):
return self.model(**_lowercase )
def lowercase__ ( self : Optional[int] , _lowercase : Dict , _lowercase : Optional[int]=None , _lowercase : Optional[int]=1 , _lowercase : str=True ):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
SCREAMING_SNAKE_CASE__ : List[str] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
SCREAMING_SNAKE_CASE__ : Tuple = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None:
SCREAMING_SNAKE_CASE__ : List[Any] = self.model.config.function_to_apply
else:
SCREAMING_SNAKE_CASE__ : Tuple = ClassificationFunction.NONE
SCREAMING_SNAKE_CASE__ : Tuple = model_outputs['''logits'''][0]
SCREAMING_SNAKE_CASE__ : Tuple = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
SCREAMING_SNAKE_CASE__ : Optional[Any] = sigmoid(_lowercase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
SCREAMING_SNAKE_CASE__ : Dict = softmax(_lowercase )
elif function_to_apply == ClassificationFunction.NONE:
SCREAMING_SNAKE_CASE__ : Any = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
SCREAMING_SNAKE_CASE__ : Tuple = [
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(_lowercase )
]
if not _legacy:
dict_scores.sort(key=lambda _lowercase : x["score"] , reverse=_lowercase )
if top_k is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = dict_scores[:top_k]
return dict_scores
| 250
| 0
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=True ) -> Optional[int]:
model.train()
_lowercase : List[Any] = model(lowerCamelCase_ )
_lowercase : str = F.mse_loss(lowerCamelCase_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=False ) -> Tuple:
set_seed(42 )
_lowercase : List[str] = RegressionModel()
_lowercase : Union[str, Any] = deepcopy(lowerCamelCase_ )
_lowercase : str = RegressionDataset(length=80 )
_lowercase : Dict = DataLoader(lowerCamelCase_ , batch_size=16 )
model.to(accelerator.device )
if sched:
_lowercase : int = AdamW(params=model.parameters() , lr=1e-3 )
_lowercase : Any = AdamW(params=ddp_model.parameters() , lr=1e-3 )
_lowercase : Optional[int] = LambdaLR(lowerCamelCase_ , lr_lambda=lambda lowerCamelCase_ : epoch**0.65 )
_lowercase : int = LambdaLR(lowerCamelCase_ , lr_lambda=lambda lowerCamelCase_ : epoch**0.65 )
# Make a copy of `model`
if sched:
_lowercase , _lowercase , _lowercase , _lowercase : int = accelerator.prepare(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
_lowercase , _lowercase : str = accelerator.prepare(lowerCamelCase_ , lowerCamelCase_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
# Test when on a single CPU or GPU that the context manager does nothing
_lowercase , _lowercase , _lowercase : Optional[int] = get_training_setup(lowerCamelCase_ )
# Use a single batch
_lowercase , _lowercase : int = next(iter(lowerCamelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_lowercase , _lowercase : Optional[int] = accelerator.gather((ddp_input, ddp_target) )
_lowercase , _lowercase : Optional[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase_ ):
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
# Sync grads
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_lowercase : str = ddp_input[torch.randperm(len(lowerCamelCase_ ) )]
def UpperCamelCase_( lowerCamelCase_ ) -> str:
# Test on distributed setup that context manager behaves properly
_lowercase , _lowercase , _lowercase : str = get_training_setup(lowerCamelCase_ )
# Use a single batch
_lowercase , _lowercase : Tuple = next(iter(lowerCamelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_lowercase , _lowercase : Any = accelerator.gather((ddp_input, ddp_target) )
_lowercase , _lowercase : Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase_ ):
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
# Sync grads
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_lowercase : Tuple = ddp_input[torch.randperm(len(lowerCamelCase_ ) )]
def UpperCamelCase_( lowerCamelCase_=False , lowerCamelCase_=False ) -> Optional[Any]:
_lowercase : Dict = Accelerator(
split_batches=lowerCamelCase_ , dispatch_batches=lowerCamelCase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_lowercase , _lowercase , _lowercase : Optional[Any] = get_training_setup(lowerCamelCase_ )
for iteration, batch in enumerate(lowerCamelCase_ ):
_lowercase , _lowercase : Any = batch.values()
# Gather the distributed inputs and targs for the base model
_lowercase , _lowercase : int = accelerator.gather((ddp_input, ddp_target) )
_lowercase , _lowercase : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCamelCase_ ):
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCamelCase_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_lowercase : List[str] = ddp_input[torch.randperm(len(lowerCamelCase_ ) )]
GradientState._reset_state()
def UpperCamelCase_( lowerCamelCase_=False , lowerCamelCase_=False ) -> Dict:
_lowercase : Optional[int] = Accelerator(
split_batches=lowerCamelCase_ , dispatch_batches=lowerCamelCase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[Any] = get_training_setup(lowerCamelCase_ , lowerCamelCase_ )
for iteration, batch in enumerate(lowerCamelCase_ ):
_lowercase , _lowercase : Any = batch.values()
# Gather the distributed inputs and targs for the base model
_lowercase , _lowercase : Any = accelerator.gather((ddp_input, ddp_target) )
_lowercase , _lowercase : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCamelCase_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCamelCase_ ):
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
_lowercase : List[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCamelCase_ ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def UpperCamelCase_( ) -> Dict:
_lowercase : Any = Accelerator()
_lowercase : Dict = RegressionDataset(length=80 )
_lowercase : Any = DataLoader(lowerCamelCase_ , batch_size=16 )
_lowercase : Union[str, Any] = RegressionDataset(length=96 )
_lowercase : Optional[Any] = DataLoader(lowerCamelCase_ , batch_size=16 )
_lowercase , _lowercase : Tuple = accelerator.prepare(lowerCamelCase_ , lowerCamelCase_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCamelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase_ )
if iteration < len(lowerCamelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCamelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase_ )
if batch_num < len(lowerCamelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def UpperCamelCase_( ) -> Tuple:
_lowercase : List[str] = Accelerator()
_lowercase : List[Any] = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(lowerCamelCase_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(lowerCamelCase_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(lowerCamelCase_ , lowerCamelCase_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 89
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
A__ : Optional[int] = ['''torch''', '''scipy''']
def __init__( self : Any , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Any ):
"""simple docstring"""
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def __UpperCAmelCase ( cls : Dict , *__lowerCamelCase : List[str] , **__lowerCamelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def __UpperCAmelCase ( cls : int , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
| 103
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ : List[Any] = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 711
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='CLIPImageProcessor'
__a =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self : str , __a : List[Any]=None , __a : Any=None , **__a : int ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : List[Any] , __a : Optional[int]=None , __a : Dict=None , __a : List[Any]=None , **__a : Dict ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_a = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
_a = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def UpperCamelCase__ ( self : Tuple , *__a : List[Any] , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : List[Any] , *__a : Union[str, Any] , **__a : Optional[Any] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : List[str] ):
_a = self.tokenizer.model_input_names
_a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 521
| 0
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowercase ( a ):
'''simple docstring'''
if isinstance(UpperCAmelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _UpperCAmelCase :
def _snake_case ( self : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Any):
pass
def _snake_case ( self : List[Any]):
pass
def _snake_case ( self : Optional[Any]):
pass
def _snake_case ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : np.ndarray , UpperCAmelCase : float):
SCREAMING_SNAKE_CASE_ :str = np.abs((a - b)).max()
self.assertLessEqual(lowerCamelCase__ , lowerCamelCase__ , F"Difference between torch and flax is {diff} (>= {tol}).")
def _snake_case ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : List[str]=None , **UpperCAmelCase : List[str]):
SCREAMING_SNAKE_CASE_ :Dict = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase__ , lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :int = FlaxVisionTextDualEncoderModel(lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = model(input_ids=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim))
def _snake_case ( self : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : Optional[int]):
SCREAMING_SNAKE_CASE_ :Dict = self.get_vision_text_model(lowerCamelCase__ , lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :int = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE_ :Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :str = model(input_ids=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim))
def _snake_case ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : str=None , **UpperCAmelCase : Union[str, Any]):
SCREAMING_SNAKE_CASE_ :str = self.get_vision_text_model(lowerCamelCase__ , lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :Optional[int] = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE_ :Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :List[str] = model(input_ids=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :Optional[Any] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :Any = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :int = model(input_ids=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = after_output[0]
SCREAMING_SNAKE_CASE_ :int = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCamelCase__ , 1E-3)
def _snake_case ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any=None , **UpperCAmelCase : Optional[Any]):
SCREAMING_SNAKE_CASE_ :str = self.get_vision_text_model(lowerCamelCase__ , lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :List[str] = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE_ :List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :List[Any] = model(
input_ids=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_attentions=lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :List[Any] = output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase__) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ :Optional[int] = to_atuple(vision_model.config.image_size)
SCREAMING_SNAKE_CASE_ :Optional[Any] = to_atuple(vision_model.config.patch_size)
SCREAMING_SNAKE_CASE_ :Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
SCREAMING_SNAKE_CASE_ :Any = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
SCREAMING_SNAKE_CASE_ :Any = output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase__) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _snake_case ( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict):
pt_model.to(lowerCamelCase__)
pt_model.eval()
# prepare inputs
SCREAMING_SNAKE_CASE_ :Optional[int] = inputs_dict
SCREAMING_SNAKE_CASE_ :List[str] = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
SCREAMING_SNAKE_CASE_ :Dict = pt_model(**lowerCamelCase__).to_tuple()
SCREAMING_SNAKE_CASE_ :Optional[Any] = fx_model(**lowerCamelCase__).to_tuple()
self.assertEqual(len(lowerCamelCase__) , len(lowerCamelCase__) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4]):
self.assert_almost_equals(lowerCamelCase__ , pt_output.numpy() , 4E-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :Any = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :Optional[int] = fx_model_loaded(**lowerCamelCase__).to_tuple()
self.assertEqual(len(lowerCamelCase__) , len(lowerCamelCase__) , "Output lengths differ between Flax and PyTorch")
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4]):
self.assert_almost_equals(lowerCamelCase__ , pt_output.numpy() , 4E-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :Tuple = VisionTextDualEncoderModel.from_pretrained(lowerCamelCase__ , from_flax=lowerCamelCase__)
pt_model_loaded.to(lowerCamelCase__)
pt_model_loaded.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ :Tuple = pt_model_loaded(**lowerCamelCase__).to_tuple()
self.assertEqual(len(lowerCamelCase__) , len(lowerCamelCase__) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4]):
self.assert_almost_equals(lowerCamelCase__ , pt_output_loaded.numpy() , 4E-2)
def _snake_case ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any]):
SCREAMING_SNAKE_CASE_ :str = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase__ , lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :Dict = VisionTextDualEncoderModel(lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :List[str] = FlaxVisionTextDualEncoderModel(lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :List[str] = fx_state
self.check_pt_flax_equivalence(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__)
def _snake_case ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict):
SCREAMING_SNAKE_CASE_ :Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase__ , lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = VisionTextDualEncoderModel(lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :Any = FlaxVisionTextDualEncoderModel(lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :Optional[int] = load_flax_weights_in_pytorch_model(lowerCamelCase__ , fx_model.params)
self.check_pt_flax_equivalence(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__)
def _snake_case ( self : Any):
SCREAMING_SNAKE_CASE_ :List[str] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase__)
def _snake_case ( self : List[Any]):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase__)
def _snake_case ( self : List[Any]):
SCREAMING_SNAKE_CASE_ :Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase__)
def _snake_case ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ :str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase__)
@is_pt_flax_cross_test
def _snake_case ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ :str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ :int = config_inputs_dict.pop("vision_config")
SCREAMING_SNAKE_CASE_ :List[str] = config_inputs_dict.pop("text_config")
SCREAMING_SNAKE_CASE_ :int = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__)
self.check_equivalence_flax_to_pt(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__)
@slow
def _snake_case ( self : str):
SCREAMING_SNAKE_CASE_ :Optional[int] = self.get_pretrained_model_and_inputs()
SCREAMING_SNAKE_CASE_ :Dict = model_a(**lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :str = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :str = model_a(**lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :Optional[int] = after_outputs[0]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCamelCase__ , 1E-5)
@require_flax
class _UpperCAmelCase ( lowercase , unittest.TestCase ):
def _snake_case ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ :str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCamelCase__ , text_from_pt=lowerCamelCase__ , )
SCREAMING_SNAKE_CASE_ :Tuple = 13
SCREAMING_SNAKE_CASE_ :Optional[int] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
SCREAMING_SNAKE_CASE_ :str = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
SCREAMING_SNAKE_CASE_ :Optional[Any] = random_attention_mask([batch_size, 4])
SCREAMING_SNAKE_CASE_ :Union[str, Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _snake_case ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any]):
SCREAMING_SNAKE_CASE_ :Any = FlaxViTModel(lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :Optional[Any] = FlaxBertModel(lowerCamelCase__)
return vision_model, text_model
def _snake_case ( self : Any):
SCREAMING_SNAKE_CASE_ :Optional[Any] = FlaxViTModelTester(self)
SCREAMING_SNAKE_CASE_ :Tuple = FlaxBertModelTester(self)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = vit_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ :Dict = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ :int = vision_config_and_inputs
SCREAMING_SNAKE_CASE_ :int = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _UpperCAmelCase ( lowercase , unittest.TestCase ):
def _snake_case ( self : List[Any]):
SCREAMING_SNAKE_CASE_ :Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCamelCase__ , text_from_pt=lowerCamelCase__ , )
SCREAMING_SNAKE_CASE_ :str = 13
SCREAMING_SNAKE_CASE_ :int = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
SCREAMING_SNAKE_CASE_ :Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
SCREAMING_SNAKE_CASE_ :Optional[int] = random_attention_mask([batch_size, 4])
SCREAMING_SNAKE_CASE_ :Union[str, Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _snake_case ( self : Any , UpperCAmelCase : int , UpperCAmelCase : Dict):
SCREAMING_SNAKE_CASE_ :str = FlaxCLIPVisionModel(lowerCamelCase__)
SCREAMING_SNAKE_CASE_ :Dict = FlaxBertModel(lowerCamelCase__)
return vision_model, text_model
def _snake_case ( self : Any):
SCREAMING_SNAKE_CASE_ :Dict = FlaxCLIPVisionModelTester(self)
SCREAMING_SNAKE_CASE_ :Tuple = FlaxBertModelTester(self)
SCREAMING_SNAKE_CASE_ :Tuple = clip_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ :Optional[int] = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ :Any = vision_config_and_inputs
SCREAMING_SNAKE_CASE_ :Any = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def _snake_case ( self : Dict):
SCREAMING_SNAKE_CASE_ :str = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0)
SCREAMING_SNAKE_CASE_ :Optional[int] = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian")
SCREAMING_SNAKE_CASE_ :List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
SCREAMING_SNAKE_CASE_ :List[str] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="np")
SCREAMING_SNAKE_CASE_ :Optional[Any] = model(**lowerCamelCase__)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
SCREAMING_SNAKE_CASE_ :Optional[Any] = np.array([[1.2284727, 0.3104122]])
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCamelCase__ , atol=1E-3))
| 631
|
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def A__ ( UpperCAmelCase_="" ):
_UpperCamelCase : Any = tempfile.mkdtemp()
return os.path.join(UpperCAmelCase_ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : List[str] = torch.rand(12 ,dtype=torch.floataa ) - 0.5
_UpperCamelCase : Optional[int] = AgentAudio(lowerCamelCase__ )
_UpperCamelCase : List[str] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ ,agent_type.to_raw() ,atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
# Ensure that the file contains the same value as the original tensor
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = sf.read(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ ,torch.tensor(lowerCamelCase__ ) ,atol=1E-4 ) )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = torch.rand(12 ,dtype=torch.floataa ) - 0.5
_UpperCamelCase : Any = get_new_path(suffix='.wav' )
sf.write(lowerCamelCase__ ,lowerCamelCase__ ,16000 )
_UpperCamelCase : List[Any] = AgentAudio(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ ,agent_type.to_raw() ,atol=1E-4 ) )
self.assertEqual(agent_type.to_string() ,lowerCamelCase__ )
@require_vision
@require_torch
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : int = torch.randint(0 ,256 ,(64, 64, 3) )
_UpperCamelCase : Optional[Any] = AgentImage(lowerCamelCase__ )
_UpperCamelCase : List[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ ,agent_type._tensor ,atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() ,Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : str = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
_UpperCamelCase : Tuple = Image.open(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = AgentImage(lowerCamelCase__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
_UpperCamelCase : Union[str, Any] = Image.open(lowerCamelCase__ )
_UpperCamelCase : List[Any] = AgentImage(lowerCamelCase__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : List[Any] = 'Hey!'
_UpperCamelCase : Optional[int] = AgentText(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,agent_type.to_string() )
self.assertEqual(lowerCamelCase__ ,agent_type.to_raw() )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
| 195
| 0
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase_ ( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : List[str] = TextToVideoSDPipeline
__A : Optional[Any] = TEXT_TO_IMAGE_PARAMS
__A : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__A : int = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def _snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
lowerCamelCase : Any = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCamelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase : List[str] = CLIPTextModel(__A )
lowerCamelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase : List[str] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _snake_case ( self , __A , __A=0 ):
"""simple docstring"""
if str(__A ).startswith("mps" ):
lowerCamelCase : Any = torch.manual_seed(__A )
else:
lowerCamelCase : Tuple = torch.Generator(device=__A ).manual_seed(__A )
lowerCamelCase : int = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : str = self.get_dummy_components()
lowerCamelCase : Optional[int] = TextToVideoSDPipeline(**__A )
lowerCamelCase : Union[str, Any] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
lowerCamelCase : Optional[int] = self.get_dummy_inputs(__A )
lowerCamelCase : Tuple = "np"
lowerCamelCase : Optional[int] = sd_pipe(**__A ).frames
lowerCamelCase : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
lowerCamelCase : Dict = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__A , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _snake_case ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__A , expected_max_diff=1e-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
lowerCamelCase : List[str] = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
lowerCamelCase : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCamelCase : Optional[int] = pipe.to("cuda" )
lowerCamelCase : Tuple = "Spiderman is surfing"
lowerCamelCase : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase : Union[str, Any] = pipe(__A , generator=__A , num_inference_steps=25 , output_type="pt" ).frames
lowerCamelCase : Dict = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
lowerCamelCase : List[str] = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
lowerCamelCase : str = pipe.to("cuda" )
lowerCamelCase : Tuple = "Spiderman is surfing"
lowerCamelCase : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase : str = pipe(__A , generator=__A , num_inference_steps=2 , output_type="pt" ).frames
lowerCamelCase : List[str] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 231
|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
lowerCamelCase : List[str] = XLNetConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Tuple = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowerCamelCase : int = finetuning_task
lowerCamelCase : int = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowerCamelCase : Union[str, Any] = XLNetForSequenceClassification(SCREAMING_SNAKE_CASE_ )
elif "squad" in finetuning_task:
lowerCamelCase : List[Any] = finetuning_task
lowerCamelCase : str = XLNetForQuestionAnswering(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase : int = XLNetLMHeadModel(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
lowerCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(f"""Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE_ )}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
print(f"""Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE_ )}""" )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 231
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
UpperCamelCase_ : Optional[int] = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Optional[Any] = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
UpperCamelCase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 115
|
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__lowerCAmelCase : Tuple = '''scheduler_config.json'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 1
_lowerCamelCase = 2
_lowerCamelCase = 3
_lowerCamelCase = 4
_lowerCamelCase = 5
@dataclass
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = SCHEDULER_CONFIG_NAME
_lowerCamelCase = ['''dtype''']
_lowerCamelCase = []
_lowerCamelCase = True
@classmethod
def UpperCAmelCase__ ( cls , _lowercase = None , _lowercase = None , _lowercase=False , **_lowercase , ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ : int = cls.load_config(
pretrained_model_name_or_path=_lowercase , subfolder=_lowercase , return_unused_kwargs=_lowercase , **_lowercase , )
snake_case_ , snake_case_ : Dict = cls.from_config(_lowercase , return_unused_kwargs=_lowercase , **_lowercase )
if hasattr(_lowercase , """create_state""" ) and getattr(_lowercase , """has_state""" , _lowercase ):
snake_case_ : Any = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase__ ( self , _lowercase , _lowercase = False , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
self.save_config(save_directory=_lowercase , push_to_hub=_lowercase , **_lowercase )
@property
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def UpperCAmelCase__ ( cls ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) )
snake_case_ : str = importlib.import_module(__name__.split(""".""" )[0] )
snake_case_ : Optional[int] = [
getattr(_lowercase , _lowercase ) for c in compatible_classes_str if hasattr(_lowercase , _lowercase )
]
return compatible_classes
def __lowerCAmelCase ( __UpperCamelCase : jnp.ndarray , __UpperCamelCase : Tuple[int] ):
'''simple docstring'''
assert len(__UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__UpperCamelCase ) - x.ndim) ) , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Any=0.999 , __UpperCamelCase : Optional[int]=jnp.floataa ):
'''simple docstring'''
def alpha_bar(__UpperCamelCase : Optional[int] ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
snake_case_ : Optional[Any] = []
for i in range(__UpperCamelCase ):
snake_case_ : Dict = i / num_diffusion_timesteps
snake_case_ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__UpperCamelCase ) / alpha_bar(__UpperCamelCase ) , __UpperCamelCase ) )
return jnp.array(__UpperCamelCase , dtype=__UpperCamelCase )
@flax.struct.dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
@classmethod
def UpperCAmelCase__ ( cls , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : Any = scheduler.config
if config.trained_betas is not None:
snake_case_ : Optional[Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
snake_case_ : int = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case_ : str = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case_ : int = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
snake_case_ : Optional[Any] = 1.0 - betas
snake_case_ : Any = jnp.cumprod(_lowercase , axis=0 )
return cls(
alphas=_lowercase , betas=_lowercase , alphas_cumprod=_lowercase , )
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ : Tuple = state.alphas_cumprod
snake_case_ : Optional[int] = alphas_cumprod[timesteps] ** 0.5
snake_case_ : Dict = sqrt_alpha_prod.flatten()
snake_case_ : int = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
snake_case_ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
snake_case_ : Dict = sqrt_one_minus_alpha_prod.flatten()
snake_case_ : Tuple = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : str = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 58
| 0
|
import math
from collections.abc import Iterator
from itertools import takewhile
def _UpperCamelCase ( lowerCAmelCase_ ) ->Union[str, Any]:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCamelCase ( ) ->Any:
UpperCAmelCase = 2
while True:
if is_prime(lowerCAmelCase_ ):
yield num
num += 1
def _UpperCamelCase ( lowerCAmelCase_ = 2_0_0_0_0_0_0 ) ->Dict:
return sum(takewhile(lambda lowerCAmelCase_ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 718
|
from math import isqrt
def _UpperCamelCase ( lowerCAmelCase_ ) ->bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowerCAmelCase_ ) + 1 ) )
def _UpperCamelCase ( lowerCAmelCase_ = 1_0**6 ) ->int:
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCAmelCase_ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 627
| 0
|
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 22
|
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
_UpperCamelCase : Union[str, Any] =parse(importlib.metadata.version("torch"))
def lowerCamelCase_ ( A_ , A_ , A_ ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
__lowerCamelCase = STR_OPERATION_TO_FUNC[operation]
if isinstance(A_ , A_ ):
__lowerCamelCase = parse(importlib.metadata.version(A_ ) )
return operation(A_ , parse(A_ ) )
def lowerCamelCase_ ( A_ , A_ ):
return compare_versions(A_ , A_ , A_ )
| 316
| 0
|
'''simple docstring'''
def _a( UpperCamelCase__ : int = 3, UpperCamelCase__ : int = 7, UpperCamelCase__ : int = 1_0_0_0_0_0_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =0
SCREAMING_SNAKE_CASE__ : Optional[int] =1
for current_denominator in range(1, limit + 1 ):
SCREAMING_SNAKE_CASE__ : Dict =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
SCREAMING_SNAKE_CASE__ : Optional[Any] =current_numerator
SCREAMING_SNAKE_CASE__ : Optional[int] =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
| 665
|
'''simple docstring'''
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =size
SCREAMING_SNAKE_CASE__ : List[Any] =[0] * size
SCREAMING_SNAKE_CASE__ : str =[0] * size
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return index | (index + 1)
@staticmethod
def __magic_name__ ( __lowercase : int ) -> int:
return (index & (index + 1)) - 1
def __magic_name__ ( self : Dict , __lowercase : int , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : List[str] =value
while index < self.size:
SCREAMING_SNAKE_CASE__ : Any =self.get_prev(__lowercase ) + 1
if current_left_border == index:
SCREAMING_SNAKE_CASE__ : List[str] =value
else:
SCREAMING_SNAKE_CASE__ : str =max(__lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_next(__lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : int , __lowercase : int ) -> int:
right -= 1 # Because of right is exclusive
SCREAMING_SNAKE_CASE__ : str =0
while left <= right:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_prev(__lowercase )
if left <= current_left:
SCREAMING_SNAKE_CASE__ : List[Any] =max(__lowercase , self.tree[right] )
SCREAMING_SNAKE_CASE__ : Any =current_left
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =max(__lowercase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665
| 1
|
'''simple docstring'''
def lowercase_ ( _lowercase ) -> set:
'''simple docstring'''
lowerCamelCase_ : str = set()
# edges = list of graph's edges
lowerCamelCase_ : Optional[Any] = get_edges(_lowercase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = edges.pop()
chosen_vertices.add(_lowercase )
chosen_vertices.add(_lowercase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_lowercase )
return chosen_vertices
def lowercase_ ( _lowercase ) -> set:
'''simple docstring'''
lowerCamelCase_ : Any = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 422
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__lowercase : str = logging.get_logger(__name__)
__lowercase : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase : Any = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__lowercase : List[str] = {
'''junnyu/roformer_chinese_small''': 1536,
'''junnyu/roformer_chinese_base''': 1536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
__lowercase : Optional[int] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Any = RoFormerTokenizer
def __init__(self , A=None , A=None , A=True , A="[UNK]" , A="[SEP]" , A="[PAD]" , A="[CLS]" , A="[MASK]" , A=True , A=None , **A , ):
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
lowerCamelCase_ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , A ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , A ) != strip_accents
):
lowerCamelCase_ : Any = getattr(A , pre_tok_state.pop('''type''' ) )
lowerCamelCase_ : Dict = do_lower_case
lowerCamelCase_ : List[Any] = strip_accents
lowerCamelCase_ : Any = pre_tok_class(**A )
lowerCamelCase_ : str = do_lower_case
def __getstate__(self ):
lowerCamelCase_ : Optional[Any] = self.__dict__.copy()
lowerCamelCase_ : List[Any] = BertPreTokenizer()
return state
def __setstate__(self , A ):
lowerCamelCase_ : str = d
lowerCamelCase_ : List[str] = self.__dict__['''_tokenizer'''].get_vocab()
lowerCamelCase_ : Union[str, Any] = PreTokenizer.custom(JiebaPreTokenizer(A ) )
def UpperCAmelCase__ (self , A , A=None ):
lowerCamelCase_ : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Optional[int] = [self.sep_token_id]
lowerCamelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Union[str, Any] = self._tokenizer.model.save(A , name=A )
return tuple(A )
def UpperCAmelCase__ (self , A , A=None , A=None , A=False , **A , ):
lowerCamelCase_ : str = BertPreTokenizer()
return super().save_pretrained(A , A , A , A , **A )
| 422
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = 0
for i in range(1 , 10_01 ):
total += i**i
return str(__snake_case )[-10:]
if __name__ == "__main__":
print(solution())
| 134
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowercase = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowercase = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCamelCase_ ( self ):
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(_lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = self.dummy_uncond_unet
lowercase = DDIMScheduler()
lowercase = self.dummy_vq_model
lowercase = LDMPipeline(unet=_lowerCamelCase , vqvae=_lowerCamelCase , scheduler=_lowerCamelCase )
ldm.to(_lowerCamelCase )
ldm.set_progress_bar_config(disable=_lowerCamelCase )
lowercase = torch.manual_seed(0 )
lowercase = ldm(generator=_lowerCamelCase , num_inference_steps=2 , output_type='numpy' ).images
lowercase = torch.manual_seed(0 )
lowercase = ldm(generator=_lowerCamelCase , num_inference_steps=2 , output_type='numpy' , return_dict=_lowerCamelCase )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
lowercase = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class a ( unittest.TestCase ):
def UpperCamelCase_ ( self ):
lowercase = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(_lowerCamelCase )
ldm.set_progress_bar_config(disable=_lowerCamelCase )
lowercase = torch.manual_seed(0 )
lowercase = ldm(generator=_lowerCamelCase , num_inference_steps=5 , output_type='numpy' ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
lowercase = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
lowercase = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 134
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.