code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
def compiled_revision(self):
"""
Reads the compiled revision from the revision file.
Returns:
the revision of this vocabulary (i.e. the string
inside the revision file), or None if is_compiled
if False
"""
if not self.is_compiled:
return None
with open(self.revision_file, 'r') as f:
revision = f.read().strip()
self._logger.debug("compiled_revision is '%s'", revision)
return revision
|
Reads the compiled revision from the revision file.
Returns:
the revision of this vocabulary (i.e. the string
inside the revision file), or None if is_compiled
if False
|
compiled_revision
|
python
|
jasperproject/jasper-client
|
client/vocabcompiler.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/vocabcompiler.py
|
MIT
|
def compile(self, phrases, force=False):
"""
Compiles this vocabulary. If the force argument is True, compilation
will be forced regardless of necessity (which means that the
preliminary check if the current revision already equals the
revision after compilation will be skipped).
This method is not meant to be overridden by subclasses - use the
_compile_vocabulary()-method instead.
Arguments:
phrases -- a list of phrases that this vocabulary will contain
force -- (optional) forces compilation (Default: False)
Returns:
The revision of the compiled vocabulary
"""
revision = self.phrases_to_revision(phrases)
if not force and self.compiled_revision == revision:
self._logger.debug('Compilation not neccessary, compiled ' +
'version matches phrases.')
return revision
if not os.path.exists(self.path):
self._logger.debug("Vocabulary dir '%s' does not exist, " +
"creating...", self.path)
try:
os.makedirs(self.path)
except OSError:
self._logger.error("Couldn't create vocabulary dir '%s'",
self.path, exc_info=True)
raise
try:
with open(self.revision_file, 'w') as f:
f.write(revision)
except (OSError, IOError):
self._logger.error("Couldn't write revision file in '%s'",
self.revision_file, exc_info=True)
raise
else:
self._logger.info('Starting compilation...')
try:
self._compile_vocabulary(phrases)
except Exception as e:
self._logger.error("Fatal compilation Error occured, " +
"cleaning up...", exc_info=True)
try:
os.remove(self.revision_file)
except OSError:
pass
raise e
else:
self._logger.info('Compilation done.')
return revision
|
Compiles this vocabulary. If the force argument is True, compilation
will be forced regardless of necessity (which means that the
preliminary check if the current revision already equals the
revision after compilation will be skipped).
This method is not meant to be overridden by subclasses - use the
_compile_vocabulary()-method instead.
Arguments:
phrases -- a list of phrases that this vocabulary will contain
force -- (optional) forces compilation (Default: False)
Returns:
The revision of the compiled vocabulary
|
compile
|
python
|
jasperproject/jasper-client
|
client/vocabcompiler.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/vocabcompiler.py
|
MIT
|
def _compile_vocabulary(self, phrases):
"""
Abstract method that should be overridden in subclasses with custom
compilation code.
Arguments:
phrases -- a list of phrases that this vocabulary will contain
"""
|
Abstract method that should be overridden in subclasses with custom
compilation code.
Arguments:
phrases -- a list of phrases that this vocabulary will contain
|
_compile_vocabulary
|
python
|
jasperproject/jasper-client
|
client/vocabcompiler.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/vocabcompiler.py
|
MIT
|
def is_compiled(self):
"""
Checks if the vocabulary is compiled by checking if the revision,
languagemodel and dictionary files are readable.
Returns:
True if this vocabulary has been compiled, else False
"""
return (super(self.__class__, self).is_compiled and
os.access(self.languagemodel_file, os.R_OK) and
os.access(self.dictionary_file, os.R_OK))
|
Checks if the vocabulary is compiled by checking if the revision,
languagemodel and dictionary files are readable.
Returns:
True if this vocabulary has been compiled, else False
|
is_compiled
|
python
|
jasperproject/jasper-client
|
client/vocabcompiler.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/vocabcompiler.py
|
MIT
|
def _compile_vocabulary(self, phrases):
"""
Compiles the vocabulary to the Pocketsphinx format by creating a
languagemodel and a dictionary.
Arguments:
phrases -- a list of phrases that this vocabulary will contain
"""
text = " ".join([("<s> %s </s>" % phrase) for phrase in phrases])
self._logger.debug('Compiling languagemodel...')
vocabulary = self._compile_languagemodel(text, self.languagemodel_file)
self._logger.debug('Starting dictionary...')
self._compile_dictionary(vocabulary, self.dictionary_file)
|
Compiles the vocabulary to the Pocketsphinx format by creating a
languagemodel and a dictionary.
Arguments:
phrases -- a list of phrases that this vocabulary will contain
|
_compile_vocabulary
|
python
|
jasperproject/jasper-client
|
client/vocabcompiler.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/vocabcompiler.py
|
MIT
|
def _compile_languagemodel(self, text, output_file):
"""
Compiles the languagemodel from a text.
Arguments:
text -- the text the languagemodel will be generated from
output_file -- the path of the file this languagemodel will
be written to
Returns:
A list of all unique words this vocabulary contains.
"""
with tempfile.NamedTemporaryFile(suffix='.vocab', delete=False) as f:
vocab_file = f.name
# Create vocab file from text
self._logger.debug("Creating vocab file: '%s'", vocab_file)
cmuclmtk.text2vocab(text, vocab_file)
# Create language model from text
self._logger.debug("Creating languagemodel file: '%s'", output_file)
cmuclmtk.text2lm(text, output_file, vocab_file=vocab_file)
# Get words from vocab file
self._logger.debug("Getting words from vocab file and removing it " +
"afterwards...")
words = []
with open(vocab_file, 'r') as f:
for line in f:
line = line.strip()
if not line.startswith('#') and line not in ('<s>', '</s>'):
words.append(line)
os.remove(vocab_file)
return words
|
Compiles the languagemodel from a text.
Arguments:
text -- the text the languagemodel will be generated from
output_file -- the path of the file this languagemodel will
be written to
Returns:
A list of all unique words this vocabulary contains.
|
_compile_languagemodel
|
python
|
jasperproject/jasper-client
|
client/vocabcompiler.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/vocabcompiler.py
|
MIT
|
def _compile_dictionary(self, words, output_file):
"""
Compiles the dictionary from a list of words.
Arguments:
words -- a list of all unique words this vocabulary contains
output_file -- the path of the file this dictionary will
be written to
"""
# create the dictionary
self._logger.debug("Getting phonemes for %d words...", len(words))
g2pconverter = PhonetisaurusG2P(**PhonetisaurusG2P.get_config())
phonemes = g2pconverter.translate(words)
self._logger.debug("Creating dict file: '%s'", output_file)
with open(output_file, "w") as f:
for word, pronounciations in phonemes.items():
for i, pronounciation in enumerate(pronounciations, start=1):
if i == 1:
line = "%s\t%s\n" % (word, pronounciation)
else:
line = "%s(%d)\t%s\n" % (word, i, pronounciation)
f.write(line)
|
Compiles the dictionary from a list of words.
Arguments:
words -- a list of all unique words this vocabulary contains
output_file -- the path of the file this dictionary will
be written to
|
_compile_dictionary
|
python
|
jasperproject/jasper-client
|
client/vocabcompiler.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/vocabcompiler.py
|
MIT
|
def get_keyword_phrases():
"""
Gets the keyword phrases from the keywords file in the jasper data dir.
Returns:
A list of keyword phrases.
"""
phrases = []
with open(jasperpath.data('keyword_phrases'), mode="r") as f:
for line in f:
phrase = line.strip()
if phrase:
phrases.append(phrase)
return phrases
|
Gets the keyword phrases from the keywords file in the jasper data dir.
Returns:
A list of keyword phrases.
|
get_keyword_phrases
|
python
|
jasperproject/jasper-client
|
client/vocabcompiler.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/vocabcompiler.py
|
MIT
|
def get_all_phrases():
"""
Gets phrases from all modules.
Returns:
A list of phrases in all modules plus additional phrases passed to this
function.
"""
phrases = []
modules = brain.Brain.get_modules()
for module in modules:
phrases.extend(get_phrases_from_module(module))
return sorted(list(set(phrases)))
|
Gets phrases from all modules.
Returns:
A list of phrases in all modules plus additional phrases passed to this
function.
|
get_all_phrases
|
python
|
jasperproject/jasper-client
|
client/vocabcompiler.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/vocabcompiler.py
|
MIT
|
def handle(text, mic, profile):
"""
Responds to user-input, typically speech text, by listing the user's
Facebook friends with birthdays today.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
"""
oauth_access_token = profile['keys']["FB_TOKEN"]
graph = facebook.GraphAPI(oauth_access_token)
try:
results = graph.request("me/friends",
args={'fields': 'id,name,birthday'})
except facebook.GraphAPIError:
mic.say("I have not been authorized to query your Facebook. If you " +
"would like to check birthdays in the future, please visit " +
"the Jasper dashboard.")
return
except:
mic.say(
"I apologize, there's a problem with that service at the moment.")
return
needle = datetime.datetime.now(tz=getTimezone(profile)).strftime("%m/%d")
people = []
for person in results['data']:
try:
if needle in person['birthday']:
people.append(person['name'])
except:
continue
if len(people) > 0:
if len(people) == 1:
output = people[0] + " has a birthday today."
else:
output = "Your friends with birthdays today are " + \
", ".join(people[:-1]) + " and " + people[-1] + "."
else:
output = "None of your friends have birthdays today."
mic.say(output)
|
Responds to user-input, typically speech text, by listing the user's
Facebook friends with birthdays today.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
|
handle
|
python
|
jasperproject/jasper-client
|
client/modules/Birthday.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Birthday.py
|
MIT
|
def getSender(email):
"""
Returns the best-guess sender of an email.
Arguments:
email -- the email whose sender is desired
Returns:
Sender of the email.
"""
sender = email['From']
m = re.match(r'(.*)\s<.*>', sender)
if m:
return m.group(1)
return sender
|
Returns the best-guess sender of an email.
Arguments:
email -- the email whose sender is desired
Returns:
Sender of the email.
|
getSender
|
python
|
jasperproject/jasper-client
|
client/modules/Gmail.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Gmail.py
|
MIT
|
def getMostRecentDate(emails):
"""
Returns the most recent date of any email in the list provided.
Arguments:
emails -- a list of emails to check
Returns:
Date of the most recent email.
"""
dates = [getDate(e) for e in emails]
dates.sort(reverse=True)
if dates:
return dates[0]
return None
|
Returns the most recent date of any email in the list provided.
Arguments:
emails -- a list of emails to check
Returns:
Date of the most recent email.
|
getMostRecentDate
|
python
|
jasperproject/jasper-client
|
client/modules/Gmail.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Gmail.py
|
MIT
|
def fetchUnreadEmails(profile, since=None, markRead=False, limit=None):
"""
Fetches a list of unread email objects from a user's Gmail inbox.
Arguments:
profile -- contains information related to the user (e.g., Gmail
address)
since -- if provided, no emails before this date will be returned
markRead -- if True, marks all returned emails as read in target inbox
Returns:
A list of unread email objects.
"""
conn = imaplib.IMAP4_SSL('imap.gmail.com')
conn.debug = 0
conn.login(profile['gmail_address'], profile['gmail_password'])
conn.select(readonly=(not markRead))
msgs = []
(retcode, messages) = conn.search(None, '(UNSEEN)')
if retcode == 'OK' and messages != ['']:
numUnread = len(messages[0].split(' '))
if limit and numUnread > limit:
return numUnread
for num in messages[0].split(' '):
# parse email RFC822 format
ret, data = conn.fetch(num, '(RFC822)')
msg = email.message_from_string(data[0][1])
if not since or getDate(msg) > since:
msgs.append(msg)
conn.close()
conn.logout()
return msgs
|
Fetches a list of unread email objects from a user's Gmail inbox.
Arguments:
profile -- contains information related to the user (e.g., Gmail
address)
since -- if provided, no emails before this date will be returned
markRead -- if True, marks all returned emails as read in target inbox
Returns:
A list of unread email objects.
|
fetchUnreadEmails
|
python
|
jasperproject/jasper-client
|
client/modules/Gmail.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Gmail.py
|
MIT
|
def handle(text, mic, profile):
"""
Responds to user-input, typically speech text, with a summary of
the user's Gmail inbox, reporting on the number of unread emails
in the inbox, as well as their senders.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., Gmail
address)
"""
try:
msgs = fetchUnreadEmails(profile, limit=5)
if isinstance(msgs, int):
response = "You have %d unread emails." % msgs
mic.say(response)
return
senders = [getSender(e) for e in msgs]
except imaplib.IMAP4.error:
mic.say(
"I'm sorry. I'm not authenticated to work with your Gmail.")
return
if not senders:
mic.say("You have no unread emails.")
elif len(senders) == 1:
mic.say("You have one unread email from " + senders[0] + ".")
else:
response = "You have %d unread emails" % len(
senders)
unique_senders = list(set(senders))
if len(unique_senders) > 1:
unique_senders[-1] = 'and ' + unique_senders[-1]
response += ". Senders include: "
response += '...'.join(senders)
else:
response += " from " + unique_senders[0]
mic.say(response)
|
Responds to user-input, typically speech text, with a summary of
the user's Gmail inbox, reporting on the number of unread emails
in the inbox, as well as their senders.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., Gmail
address)
|
handle
|
python
|
jasperproject/jasper-client
|
client/modules/Gmail.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Gmail.py
|
MIT
|
def getTopStories(maxResults=None):
"""
Returns the top headlines from Hacker News.
Arguments:
maxResults -- if provided, returns a random sample of size maxResults
"""
hdr = {'User-Agent': 'Mozilla/5.0'}
req = urllib2.Request(URL, headers=hdr)
page = urllib2.urlopen(req).read()
soup = BeautifulSoup(page)
matches = soup.findAll('td', class_="title")
matches = [m.a for m in matches if m.a and m.text != u'More']
matches = [HNStory(m.text, m['href']) for m in matches]
if maxResults:
num_stories = min(maxResults, len(matches))
return random.sample(matches, num_stories)
return matches
|
Returns the top headlines from Hacker News.
Arguments:
maxResults -- if provided, returns a random sample of size maxResults
|
getTopStories
|
python
|
jasperproject/jasper-client
|
client/modules/HN.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/modules/HN.py
|
MIT
|
def handle(text, mic, profile):
"""
Responds to user-input, typically speech text, with a sample of
Hacker News's top headlines, sending them to the user over email
if desired.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
"""
mic.say("Pulling up some stories.")
stories = getTopStories(maxResults=3)
all_titles = '... '.join(str(idx + 1) + ") " +
story.title for idx, story in enumerate(stories))
def handleResponse(text):
def extractOrdinals(text):
output = []
service = NumberService()
for w in text.split():
if w in service.__ordinals__:
output.append(service.__ordinals__[w])
return [service.parse(w) for w in output]
chosen_articles = extractOrdinals(text)
send_all = not chosen_articles and app_utils.isPositive(text)
if send_all or chosen_articles:
mic.say("Sure, just give me a moment")
if profile['prefers_email']:
body = "<ul>"
def formatArticle(article):
tiny_url = app_utils.generateTinyURL(article.URL)
if profile['prefers_email']:
return "<li><a href=\'%s\'>%s</a></li>" % (tiny_url,
article.title)
else:
return article.title + " -- " + tiny_url
for idx, article in enumerate(stories):
if send_all or (idx + 1) in chosen_articles:
article_link = formatArticle(article)
if profile['prefers_email']:
body += article_link
else:
if not app_utils.emailUser(profile, SUBJECT="",
BODY=article_link):
mic.say("I'm having trouble sending you these " +
"articles. Please make sure that your " +
"phone number and carrier are correct " +
"on the dashboard.")
return
# if prefers email, we send once, at the end
if profile['prefers_email']:
body += "</ul>"
if not app_utils.emailUser(profile,
SUBJECT="From the Front Page of " +
"Hacker News",
BODY=body):
mic.say("I'm having trouble sending you these articles. " +
"Please make sure that your phone number and " +
"carrier are correct on the dashboard.")
return
mic.say("All done.")
else:
mic.say("OK I will not send any articles")
if not profile['prefers_email'] and profile['phone_number']:
mic.say("Here are some front-page articles. " +
all_titles + ". Would you like me to send you these? " +
"If so, which?")
handleResponse(mic.activeListen())
else:
mic.say("Here are some front-page articles. " + all_titles)
|
Responds to user-input, typically speech text, with a sample of
Hacker News's top headlines, sending them to the user over email
if desired.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
|
handle
|
python
|
jasperproject/jasper-client
|
client/modules/HN.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/modules/HN.py
|
MIT
|
def handle(text, mic, profile):
"""
Responds to user-input, typically speech text, by telling a joke.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
"""
joke = getRandomJoke()
mic.say("Knock knock")
def firstLine(text):
mic.say(joke[0])
def punchLine(text):
mic.say(joke[1])
punchLine(mic.activeListen())
firstLine(mic.activeListen())
|
Responds to user-input, typically speech text, by telling a joke.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
|
handle
|
python
|
jasperproject/jasper-client
|
client/modules/Joke.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Joke.py
|
MIT
|
def handle(text, mic, profile):
"""
Responds to user-input, typically speech text, by relaying the
meaning of life.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
"""
messages = ["It's 42, you idiot.",
"It's 42. How many times do I have to tell you?"]
message = random.choice(messages)
mic.say(message)
|
Responds to user-input, typically speech text, by relaying the
meaning of life.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
|
handle
|
python
|
jasperproject/jasper-client
|
client/modules/Life.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Life.py
|
MIT
|
def handle(text, mic, profile):
"""
Responds to user-input, typically speech text, by telling a joke.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
"""
logger = logging.getLogger(__name__)
kwargs = {}
if 'mpdclient' in profile:
if 'server' in profile['mpdclient']:
kwargs['server'] = profile['mpdclient']['server']
if 'port' in profile['mpdclient']:
kwargs['port'] = int(profile['mpdclient']['port'])
logger.debug("Preparing to start music module")
try:
mpdwrapper = MPDWrapper(**kwargs)
except:
logger.error("Couldn't connect to MPD server", exc_info=True)
mic.say("I'm sorry. It seems that Spotify is not enabled. Please " +
"read the documentation to learn how to configure Spotify.")
return
mic.say("Please give me a moment, I'm loading your Spotify playlists.")
# FIXME: Make this configurable
persona = 'JASPER'
logger.debug("Starting music mode")
music_mode = MusicMode(persona, mic, mpdwrapper)
music_mode.handleForever()
logger.debug("Exiting music mode")
return
|
Responds to user-input, typically speech text, by telling a joke.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
|
handle
|
python
|
jasperproject/jasper-client
|
client/modules/MPDControl.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/modules/MPDControl.py
|
MIT
|
def __init__(self, server="localhost", port=6600):
"""
Prepare the client and music variables
"""
self.server = server
self.port = port
# prepare client
self.client = mpd.MPDClient()
self.client.timeout = None
self.client.idletimeout = None
self.client.connect(self.server, self.port)
# gather playlists
self.playlists = [x["playlist"] for x in self.client.listplaylists()]
# gather songs
self.client.clear()
for playlist in self.playlists:
self.client.load(playlist)
self.songs = [] # may have duplicates
# capitalized strings
self.song_titles = []
self.song_artists = []
soup = self.client.playlist()
for i in range(0, len(soup) / 10):
index = i * 10
id = soup[index].strip()
title = soup[index + 3].strip().upper()
artist = soup[index + 2].strip().upper()
album = soup[index + 4].strip().upper()
self.songs.append(Song(id, title, artist, album))
self.song_titles.append(title)
self.song_artists.append(artist)
|
Prepare the client and music variables
|
__init__
|
python
|
jasperproject/jasper-client
|
client/modules/MPDControl.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/modules/MPDControl.py
|
MIT
|
def play(self, songs=False, playlist_name=False):
"""
Plays the current song or accepts a song to play.
Arguments:
songs -- a list of song objects
playlist_name -- user-defined, something like "Love Song Playlist"
"""
if songs:
self.client.clear()
for song in songs:
try: # for some reason, certain ids don't work
self.client.add(song.id)
except:
pass
if playlist_name:
self.client.clear()
self.client.load(playlist_name)
self.client.play()
|
Plays the current song or accepts a song to play.
Arguments:
songs -- a list of song objects
playlist_name -- user-defined, something like "Love Song Playlist"
|
play
|
python
|
jasperproject/jasper-client
|
client/modules/MPDControl.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/modules/MPDControl.py
|
MIT
|
def get_soup(self):
"""
Returns the list of unique words that comprise song and artist titles
"""
soup = []
for song in self.songs:
song_words = song.title.split(" ")
artist_words = song.artist.split(" ")
soup.extend(song_words)
soup.extend(artist_words)
title_trans = ''.join(chr(c) if chr(c).isupper() or chr(c).islower()
else '_' for c in range(256))
soup = [x.decode('utf-8').encode("ascii", "ignore").upper().translate(
title_trans).replace("_", "") for x in soup]
soup = [x for x in soup if x != ""]
return list(set(soup))
|
Returns the list of unique words that comprise song and artist titles
|
get_soup
|
python
|
jasperproject/jasper-client
|
client/modules/MPDControl.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/modules/MPDControl.py
|
MIT
|
def get_soup_playlist(self):
"""
Returns the list of unique words that comprise playlist names
"""
soup = []
for name in self.playlists:
soup.extend(name.split(" "))
title_trans = ''.join(chr(c) if chr(c).isupper() or chr(c).islower()
else '_' for c in range(256))
soup = [x.decode('utf-8').encode("ascii", "ignore").upper().translate(
title_trans).replace("_", "") for x in soup]
soup = [x for x in soup if x != ""]
return list(set(soup))
|
Returns the list of unique words that comprise playlist names
|
get_soup_playlist
|
python
|
jasperproject/jasper-client
|
client/modules/MPDControl.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/modules/MPDControl.py
|
MIT
|
def get_soup_separated(self):
"""
Returns the list of PHRASES that comprise song and artist titles
"""
title_soup = [song.title for song in self.songs]
artist_soup = [song.artist for song in self.songs]
soup = list(set(title_soup + artist_soup))
title_trans = ''.join(chr(c) if chr(c).isupper() or chr(c).islower()
else '_' for c in range(256))
soup = [x.decode('utf-8').encode("ascii", "ignore").upper().translate(
title_trans).replace("_", " ") for x in soup]
soup = [re.sub(' +', ' ', x) for x in soup if x != ""]
return soup
|
Returns the list of PHRASES that comprise song and artist titles
|
get_soup_separated
|
python
|
jasperproject/jasper-client
|
client/modules/MPDControl.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/modules/MPDControl.py
|
MIT
|
def fuzzy_songs(self, query):
"""
Returns songs matching a query best as possible on either artist
field, etc
"""
query = query.upper()
matched_song_titles = difflib.get_close_matches(query,
self.song_titles)
matched_song_artists = difflib.get_close_matches(query,
self.song_artists)
# if query is beautifully matched, then forget about everything else
strict_priority_title = [x for x in matched_song_titles if x == query]
strict_priority_artists = [
x for x in matched_song_artists if x == query]
if strict_priority_title:
matched_song_titles = strict_priority_title
if strict_priority_artists:
matched_song_artists = strict_priority_artists
matched_songs_bytitle = [
song for song in self.songs if song.title in matched_song_titles]
matched_songs_byartist = [
song for song in self.songs if song.artist in matched_song_artists]
matches = list(set(matched_songs_bytitle + matched_songs_byartist))
return matches
|
Returns songs matching a query best as possible on either artist
field, etc
|
fuzzy_songs
|
python
|
jasperproject/jasper-client
|
client/modules/MPDControl.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/modules/MPDControl.py
|
MIT
|
def fuzzy_playlists(self, query):
"""
returns playlist names that match query best as possible
"""
query = query.upper()
lookup = {n.upper(): n for n in self.playlists}
results = [lookup[r] for r in difflib.get_close_matches(query, lookup)]
return results
|
returns playlist names that match query best as possible
|
fuzzy_playlists
|
python
|
jasperproject/jasper-client
|
client/modules/MPDControl.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/modules/MPDControl.py
|
MIT
|
def handle(text, mic, profile):
"""
Responds to user-input, typically speech text, with a summary of
the day's top news headlines, sending them to the user over email
if desired.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
"""
mic.say("Pulling up the news")
articles = getTopArticles(maxResults=3)
titles = [" ".join(x.title.split(" - ")[:-1]) for x in articles]
all_titles = "... ".join(str(idx + 1) + ")" +
title for idx, title in enumerate(titles))
def handleResponse(text):
def extractOrdinals(text):
output = []
service = NumberService()
for w in text.split():
if w in service.__ordinals__:
output.append(service.__ordinals__[w])
return [service.parse(w) for w in output]
chosen_articles = extractOrdinals(text)
send_all = not chosen_articles and app_utils.isPositive(text)
if send_all or chosen_articles:
mic.say("Sure, just give me a moment")
if profile['prefers_email']:
body = "<ul>"
def formatArticle(article):
tiny_url = app_utils.generateTinyURL(article.URL)
if profile['prefers_email']:
return "<li><a href=\'%s\'>%s</a></li>" % (tiny_url,
article.title)
else:
return article.title + " -- " + tiny_url
for idx, article in enumerate(articles):
if send_all or (idx + 1) in chosen_articles:
article_link = formatArticle(article)
if profile['prefers_email']:
body += article_link
else:
if not app_utils.emailUser(profile, SUBJECT="",
BODY=article_link):
mic.say("I'm having trouble sending you these " +
"articles. Please make sure that your " +
"phone number and carrier are correct " +
"on the dashboard.")
return
# if prefers email, we send once, at the end
if profile['prefers_email']:
body += "</ul>"
if not app_utils.emailUser(profile,
SUBJECT="Your Top Headlines",
BODY=body):
mic.say("I'm having trouble sending you these articles. " +
"Please make sure that your phone number and " +
"carrier are correct on the dashboard.")
return
mic.say("All set")
else:
mic.say("OK I will not send any articles")
if 'phone_number' in profile:
mic.say("Here are the current top headlines. " + all_titles +
". Would you like me to send you these articles? " +
"If so, which?")
handleResponse(mic.activeListen())
else:
mic.say(
"Here are the current top headlines. " + all_titles)
|
Responds to user-input, typically speech text, with a summary of
the day's top news headlines, sending them to the user over email
if desired.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
|
handle
|
python
|
jasperproject/jasper-client
|
client/modules/News.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/modules/News.py
|
MIT
|
def handle(text, mic, profile):
"""
Responds to user-input, typically speech text, with a summary of
the user's Facebook notifications, including a count and details
related to each individual notification.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
"""
oauth_access_token = profile['keys']['FB_TOKEN']
graph = facebook.GraphAPI(oauth_access_token)
try:
results = graph.request("me/notifications")
except facebook.GraphAPIError:
mic.say("I have not been authorized to query your Facebook. If you " +
"would like to check your notifications in the future, " +
"please visit the Jasper dashboard.")
return
except:
mic.say(
"I apologize, there's a problem with that service at the moment.")
if not len(results['data']):
mic.say("You have no Facebook notifications. ")
return
updates = []
for notification in results['data']:
updates.append(notification['title'])
count = len(results['data'])
mic.say("You have " + str(count) +
" Facebook notifications. " + " ".join(updates) + ". ")
return
|
Responds to user-input, typically speech text, with a summary of
the user's Facebook notifications, including a count and details
related to each individual notification.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
|
handle
|
python
|
jasperproject/jasper-client
|
client/modules/Notifications.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Notifications.py
|
MIT
|
def handle(text, mic, profile):
"""
Reports the current time based on the user's timezone.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
"""
tz = getTimezone(profile)
now = datetime.datetime.now(tz=tz)
service = DateService()
response = service.convertTime(now)
mic.say("It is %s right now." % response)
|
Reports the current time based on the user's timezone.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
|
handle
|
python
|
jasperproject/jasper-client
|
client/modules/Time.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Time.py
|
MIT
|
def handle(text, mic, profile):
"""
Reports that the user has unclear or unusable input.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
"""
messages = ["I'm sorry, could you repeat that?",
"My apologies, could you try saying that again?",
"Say that again?", "I beg your pardon?"]
message = random.choice(messages)
mic.say(message)
|
Reports that the user has unclear or unusable input.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
|
handle
|
python
|
jasperproject/jasper-client
|
client/modules/Unclear.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Unclear.py
|
MIT
|
def replaceAcronyms(text):
"""
Replaces some commonly-used acronyms for an improved verbal weather report.
"""
def parseDirections(text):
words = {
'N': 'north',
'S': 'south',
'E': 'east',
'W': 'west',
}
output = [words[w] for w in list(text)]
return ' '.join(output)
acronyms = re.findall(r'\b([NESW]+)\b', text)
for w in acronyms:
text = text.replace(w, parseDirections(w))
text = re.sub(r'(\b\d+)F(\b)', '\g<1> Fahrenheit\g<2>', text)
text = re.sub(r'(\b)mph(\b)', '\g<1>miles per hour\g<2>', text)
text = re.sub(r'(\b)in\.', '\g<1>inches', text)
return text
|
Replaces some commonly-used acronyms for an improved verbal weather report.
|
replaceAcronyms
|
python
|
jasperproject/jasper-client
|
client/modules/Weather.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Weather.py
|
MIT
|
def handle(text, mic, profile):
"""
Responds to user-input, typically speech text, with a summary of
the relevant weather for the requested date (typically, weather
information will not be available for days beyond tomorrow).
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
"""
forecast = None
if 'wmo_id' in profile:
forecast = get_forecast_by_wmo_id(str(profile['wmo_id']))
elif 'location' in profile:
forecast = get_forecast_by_name(str(profile['location']))
if not forecast:
mic.say("I'm sorry, I can't seem to access that information. Please " +
"make sure that you've set your location on the dashboard.")
return
tz = getTimezone(profile)
service = DateService(tz=tz)
date = service.extractDay(text)
if not date:
date = datetime.datetime.now(tz=tz)
weekday = service.__daysOfWeek__[date.weekday()]
if date.weekday() == datetime.datetime.now(tz=tz).weekday():
date_keyword = "Today"
elif date.weekday() == (
datetime.datetime.now(tz=tz).weekday() + 1) % 7:
date_keyword = "Tomorrow"
else:
date_keyword = "On " + weekday
output = None
for entry in forecast:
try:
date_desc = entry['title'].split()[0].strip().lower()
if date_desc == 'forecast':
# For global forecasts
date_desc = entry['title'].split()[2].strip().lower()
weather_desc = entry['summary']
elif date_desc == 'current':
# For first item of global forecasts
continue
else:
# US forecasts
weather_desc = entry['summary'].split('-')[1]
if weekday == date_desc:
output = date_keyword + \
", the weather will be " + weather_desc + "."
break
except:
continue
if output:
output = replaceAcronyms(output)
mic.say(output)
else:
mic.say(
"I'm sorry. I can't see that far ahead.")
|
Responds to user-input, typically speech text, with a summary of
the relevant weather for the requested date (typically, weather
information will not be available for days beyond tomorrow).
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
|
handle
|
python
|
jasperproject/jasper-client
|
client/modules/Weather.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Weather.py
|
MIT
|
def isValid(text):
"""
Returns True if the text is related to the weather.
Arguments:
text -- user-input, typically transcribed speech
"""
return bool(re.search(r'\b(weathers?|temperature|forecast|outside|hot|' +
r'cold|jacket|coat|rain)\b', text, re.IGNORECASE))
|
Returns True if the text is related to the weather.
Arguments:
text -- user-input, typically transcribed speech
|
isValid
|
python
|
jasperproject/jasper-client
|
client/modules/Weather.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/modules/Weather.py
|
MIT
|
def testLog(self):
"""Does Brain correctly log errors when raised by modules?"""
my_brain = TestBrain._emptyBrain()
unclear = my_brain.modules[-1]
with mock.patch.object(unclear, 'handle') as mocked_handle:
with mock.patch.object(my_brain._logger, 'error') as mocked_log:
mocked_handle.side_effect = KeyError('foo')
my_brain.query("zzz gibberish zzz")
self.assertTrue(mocked_log.called)
|
Does Brain correctly log errors when raised by modules?
|
testLog
|
python
|
jasperproject/jasper-client
|
tests/test_brain.py
|
https://github.com/jasperproject/jasper-client/blob/master/tests/test_brain.py
|
MIT
|
def testSortByPriority(self):
"""Does Brain sort modules by priority?"""
my_brain = TestBrain._emptyBrain()
priorities = filter(lambda m: hasattr(m, 'PRIORITY'), my_brain.modules)
target = sorted(priorities, key=lambda m: m.PRIORITY, reverse=True)
self.assertEqual(target, priorities)
|
Does Brain sort modules by priority?
|
testSortByPriority
|
python
|
jasperproject/jasper-client
|
tests/test_brain.py
|
https://github.com/jasperproject/jasper-client/blob/master/tests/test_brain.py
|
MIT
|
def testPriority(self):
"""Does Brain correctly send query to higher-priority module?"""
my_brain = TestBrain._emptyBrain()
hn_module = 'HN'
hn = filter(lambda m: m.__name__ == hn_module, my_brain.modules)[0]
with mock.patch.object(hn, 'handle') as mocked_handle:
my_brain.query(["hacker news"])
self.assertTrue(mocked_handle.called)
|
Does Brain correctly send query to higher-priority module?
|
testPriority
|
python
|
jasperproject/jasper-client
|
tests/test_brain.py
|
https://github.com/jasperproject/jasper-client/blob/master/tests/test_brain.py
|
MIT
|
def runConversation(self, query, inputs, module):
"""Generic method for spoofing conversation.
Arguments:
query -- The initial input to the server.
inputs -- Additional input, if conversation is extended.
Returns:
The server's responses, in a list.
"""
self.assertTrue(module.isValid(query))
mic = test_mic.Mic(inputs)
module.handle(query, mic, self.profile)
return mic.outputs
|
Generic method for spoofing conversation.
Arguments:
query -- The initial input to the server.
inputs -- Additional input, if conversation is extended.
Returns:
The server's responses, in a list.
|
runConversation
|
python
|
jasperproject/jasper-client
|
tests/test_modules.py
|
https://github.com/jasperproject/jasper-client/blob/master/tests/test_modules.py
|
MIT
|
def testTranscribeJasper(self):
"""
Does Jasper recognize his name (i.e., passive listen)?
"""
with open(self.jasper_clip, mode="rb") as f:
transcription = self.passive_stt_engine.transcribe(f)
self.assertIn("JASPER", transcription)
|
Does Jasper recognize his name (i.e., passive listen)?
|
testTranscribeJasper
|
python
|
jasperproject/jasper-client
|
tests/test_stt.py
|
https://github.com/jasperproject/jasper-client/blob/master/tests/test_stt.py
|
MIT
|
def testTranscribe(self):
"""
Does Jasper recognize 'time' (i.e., active listen)?
"""
with open(self.time_clip, mode="rb") as f:
transcription = self.active_stt_engine.transcribe(f)
self.assertIn("TIME", transcription)
|
Does Jasper recognize 'time' (i.e., active listen)?
|
testTranscribe
|
python
|
jasperproject/jasper-client
|
tests/test_stt.py
|
https://github.com/jasperproject/jasper-client/blob/master/tests/test_stt.py
|
MIT
|
def prepare_latents(
self,
batch_size: int, # Number of videos to generate in parallel
num_channels_latents: int, # Number of channels in the latents
width: int, # Width of the video frame
height: int, # Height of the video frame
video_length: int, # Length of the video in frames
dtype: torch.dtype, # Data type of the latents
device: torch.device, # Device to store the latents on
generator: Optional[torch.Generator] = None, # Random number generator for reproducibility
latents: Optional[torch.Tensor] = None # Pre-generated latents (optional)
):
"""
Prepares the initial latents for video generation.
Args:
batch_size (int): Number of videos to generate in parallel.
num_channels_latents (int): Number of channels in the latents.
width (int): Width of the video frame.
height (int): Height of the video frame.
video_length (int): Length of the video in frames.
dtype (torch.dtype): Data type of the latents.
device (torch.device): Device to store the latents on.
generator (Optional[torch.Generator]): Random number generator for reproducibility.
latents (Optional[torch.Tensor]): Pre-generated latents (optional).
Returns:
latents (torch.Tensor): Tensor of shape (batch_size, num_channels_latents, width, height)
containing the initial latents for video generation.
"""
shape = (
batch_size,
num_channels_latents,
video_length,
height // self.vae_scale_factor,
width // self.vae_scale_factor,
)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(
shape, generator=generator, device=device, dtype=dtype
)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
|
Prepares the initial latents for video generation.
Args:
batch_size (int): Number of videos to generate in parallel.
num_channels_latents (int): Number of channels in the latents.
width (int): Width of the video frame.
height (int): Height of the video frame.
video_length (int): Length of the video in frames.
dtype (torch.dtype): Data type of the latents.
device (torch.device): Device to store the latents on.
generator (Optional[torch.Generator]): Random number generator for reproducibility.
latents (Optional[torch.Tensor]): Pre-generated latents (optional).
Returns:
latents (torch.Tensor): Tensor of shape (batch_size, num_channels_latents, width, height)
containing the initial latents for video generation.
|
prepare_latents
|
python
|
jdh-algo/JoyHallo
|
joyhallo/animate/face_animate.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/animate/face_animate.py
|
MIT
|
def decode_latents(self, latents):
"""
Decode the latents to produce a video.
Parameters:
latents (torch.Tensor): The latents to be decoded.
Returns:
video (torch.Tensor): The decoded video.
video_length (int): The length of the video in frames.
"""
video_length = latents.shape[2]
latents = 1 / 0.18215 * latents
latents = rearrange(latents, "b c f h w -> (b f) c h w")
# video = self.vae.decode(latents).sample
video = []
for frame_idx in tqdm(range(latents.shape[0])):
video.append(self.vae.decode(
latents[frame_idx: frame_idx + 1]).sample)
video = torch.cat(video)
video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length)
video = (video / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
video = video.cpu().float().numpy()
return video
|
Decode the latents to produce a video.
Parameters:
latents (torch.Tensor): The latents to be decoded.
Returns:
video (torch.Tensor): The decoded video.
video_length (int): The length of the video in frames.
|
decode_latents
|
python
|
jdh-algo/JoyHallo
|
joyhallo/animate/face_animate.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/animate/face_animate.py
|
MIT
|
def enable_sequential_cpu_offload(self, gpu_id=0):
"""
Offloads selected models to the GPU for increased performance.
Args:
gpu_id (int, optional): The ID of the GPU to offload models to. Defaults to 0.
"""
device = torch.device(f"cuda:{gpu_id}")
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
if cpu_offloaded_model is not None:
cpu_offload(cpu_offloaded_model, device)
|
Offloads selected models to the GPU for increased performance.
Args:
gpu_id (int, optional): The ID of the GPU to offload models to. Defaults to 0.
|
enable_sequential_cpu_offload
|
python
|
jdh-algo/JoyHallo
|
joyhallo/animate/face_animate_static.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/animate/face_animate_static.py
|
MIT
|
def decode_latents(self, latents):
"""
Decode the given latents to video frames.
Parameters:
latents (torch.Tensor): The latents to be decoded. Shape: (batch_size, num_channels_latents, video_length, height, width).
Returns:
video (torch.Tensor): The decoded video frames. Shape: (batch_size, num_channels_latents, video_length, height, width).
"""
video_length = latents.shape[2]
latents = 1 / 0.18215 * latents
latents = rearrange(latents, "b c f h w -> (b f) c h w")
# video = self.vae.decode(latents).sample
video = []
for frame_idx in tqdm(range(latents.shape[0])):
video.append(self.vae.decode(
latents[frame_idx: frame_idx + 1]).sample)
video = torch.cat(video)
video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length)
video = (video / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
video = video.cpu().float().numpy()
return video
|
Decode the given latents to video frames.
Parameters:
latents (torch.Tensor): The latents to be decoded. Shape: (batch_size, num_channels_latents, video_length, height, width).
Returns:
video (torch.Tensor): The decoded video frames. Shape: (batch_size, num_channels_latents, video_length, height, width).
|
decode_latents
|
python
|
jdh-algo/JoyHallo
|
joyhallo/animate/face_animate_static.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/animate/face_animate_static.py
|
MIT
|
def prepare_latents(
self,
batch_size,
num_channels_latents,
width,
height,
dtype,
device,
generator,
latents=None,
):
"""
Prepares the initial latents for the diffusion pipeline.
Args:
batch_size (int): The number of images to generate in one forward pass.
num_channels_latents (int): The number of channels in the latents tensor.
width (int): The width of the latents tensor.
height (int): The height of the latents tensor.
dtype (torch.dtype): The data type of the latents tensor.
device (torch.device): The device to place the latents tensor on.
generator (Optional[torch.Generator], optional): A random number generator
for reproducibility. Defaults to None.
latents (Optional[torch.Tensor], optional): Pre-computed latents to use as
initial conditions for the diffusion process. Defaults to None.
Returns:
torch.Tensor: The prepared latents tensor.
"""
shape = (
batch_size,
num_channels_latents,
height // self.vae_scale_factor,
width // self.vae_scale_factor,
)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(
shape, generator=generator, device=device, dtype=dtype
)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
|
Prepares the initial latents for the diffusion pipeline.
Args:
batch_size (int): The number of images to generate in one forward pass.
num_channels_latents (int): The number of channels in the latents tensor.
width (int): The width of the latents tensor.
height (int): The height of the latents tensor.
dtype (torch.dtype): The data type of the latents tensor.
device (torch.device): The device to place the latents tensor on.
generator (Optional[torch.Generator], optional): A random number generator
for reproducibility. Defaults to None.
latents (Optional[torch.Tensor], optional): Pre-computed latents to use as
initial conditions for the diffusion process. Defaults to None.
Returns:
torch.Tensor: The prepared latents tensor.
|
prepare_latents
|
python
|
jdh-algo/JoyHallo
|
joyhallo/animate/face_animate_static.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/animate/face_animate_static.py
|
MIT
|
def prepare_condition(
self,
cond_image,
width,
height,
device,
dtype,
do_classififer_free_guidance=False,
):
"""
Prepares the condition for the face animation pipeline.
Args:
cond_image (torch.Tensor): The conditional image tensor.
width (int): The width of the output image.
height (int): The height of the output image.
device (torch.device): The device to run the pipeline on.
dtype (torch.dtype): The data type of the tensor.
do_classififer_free_guidance (bool, optional): Whether to use classifier-free guidance or not. Defaults to False.
Returns:
Tuple[torch.Tensor, torch.Tensor]: A tuple of processed condition and mask tensors.
"""
image = self.cond_image_processor.preprocess(
cond_image, height=height, width=width
).to(dtype=torch.float32)
image = image.to(device=device, dtype=dtype)
if do_classififer_free_guidance:
image = torch.cat([image] * 2)
return image
|
Prepares the condition for the face animation pipeline.
Args:
cond_image (torch.Tensor): The conditional image tensor.
width (int): The width of the output image.
height (int): The height of the output image.
device (torch.device): The device to run the pipeline on.
dtype (torch.dtype): The data type of the tensor.
do_classififer_free_guidance (bool, optional): Whether to use classifier-free guidance or not. Defaults to False.
Returns:
Tuple[torch.Tensor, torch.Tensor]: A tuple of processed condition and mask tensors.
|
prepare_condition
|
python
|
jdh-algo/JoyHallo
|
joyhallo/animate/face_animate_static.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/animate/face_animate_static.py
|
MIT
|
def preprocess(self, wav_file: str, clip_length: int=-1):
"""
Preprocess a WAV audio file by separating the vocals from the background and resampling it to a 16 kHz sample rate.
The separated vocal track is then converted into wav2vec2 for further processing or analysis.
Args:
wav_file (str): The path to the WAV file to be processed. This file should be accessible and in WAV format.
Raises:
RuntimeError: Raises an exception if the WAV file cannot be processed. This could be due to issues
such as file not found, unsupported file format, or errors during the audio processing steps.
Returns:
torch.tensor: Returns an audio embedding as a torch.tensor
"""
if self.audio_separator is not None:
# 1. separate vocals
# TODO: process in memory
outputs = self.audio_separator.separate(wav_file)
if len(outputs) <= 0:
raise RuntimeError("Audio separate failed.")
vocal_audio_file = outputs[0]
vocal_audio_name, _ = os.path.splitext(vocal_audio_file)
vocal_audio_file = os.path.join(self.audio_separator.output_dir, vocal_audio_file)
vocal_audio_file = resample_audio(vocal_audio_file, os.path.join(self.audio_separator.output_dir, f"{vocal_audio_name}-16k.wav"), self.sample_rate)
else:
vocal_audio_file=wav_file
# 2. extract wav2vec features
speech_array, sampling_rate = librosa.load(vocal_audio_file, sr=self.sample_rate)
audio_feature = np.squeeze(self.wav2vec_feature_extractor(speech_array, sampling_rate=sampling_rate).input_values)
seq_len = math.ceil(len(audio_feature) / self.sample_rate * self.fps)
audio_length = seq_len
audio_feature = torch.from_numpy(audio_feature).float().to(device=self.device)
if clip_length>0 and seq_len % clip_length != 0:
audio_feature = torch.nn.functional.pad(audio_feature, (0, (clip_length - seq_len % clip_length) * (self.sample_rate // self.fps)), 'constant', 0.0)
seq_len += clip_length - seq_len % clip_length
audio_feature = audio_feature.unsqueeze(0)
with torch.no_grad():
embeddings = self.audio_encoder(audio_feature, seq_len=seq_len, output_hidden_states=True)
assert len(embeddings) > 0, "Fail to extract audio embedding"
if self.only_last_features:
audio_emb = embeddings.last_hidden_state.squeeze()
else:
audio_emb = torch.stack(embeddings.hidden_states[1:], dim=1).squeeze(0)
audio_emb = rearrange(audio_emb, "b s d -> s b d")
audio_emb = audio_emb.cpu().detach()
return audio_emb, audio_length
|
Preprocess a WAV audio file by separating the vocals from the background and resampling it to a 16 kHz sample rate.
The separated vocal track is then converted into wav2vec2 for further processing or analysis.
Args:
wav_file (str): The path to the WAV file to be processed. This file should be accessible and in WAV format.
Raises:
RuntimeError: Raises an exception if the WAV file cannot be processed. This could be due to issues
such as file not found, unsupported file format, or errors during the audio processing steps.
Returns:
torch.tensor: Returns an audio embedding as a torch.tensor
|
preprocess
|
python
|
jdh-algo/JoyHallo
|
joyhallo/datasets/audio_processor.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/datasets/audio_processor.py
|
MIT
|
def get_embedding(self, wav_file: str):
"""preprocess wav audio file convert to embeddings
Args:
wav_file (str): The path to the WAV file to be processed. This file should be accessible and in WAV format.
Returns:
torch.tensor: Returns an audio embedding as a torch.tensor
"""
speech_array, sampling_rate = librosa.load(
wav_file, sr=self.sample_rate)
assert sampling_rate == 16000, "The audio sample rate must be 16000"
audio_feature = np.squeeze(self.wav2vec_feature_extractor(
speech_array, sampling_rate=sampling_rate).input_values)
seq_len = math.ceil(len(audio_feature) / self.sample_rate * self.fps)
audio_feature = torch.from_numpy(
audio_feature).float().to(device=self.device)
audio_feature = audio_feature.unsqueeze(0)
with torch.no_grad():
embeddings = self.audio_encoder(
audio_feature, seq_len=seq_len, output_hidden_states=True)
assert len(embeddings) > 0, "Fail to extract audio embedding"
if self.only_last_features:
audio_emb = embeddings.last_hidden_state.squeeze()
else:
audio_emb = torch.stack(
embeddings.hidden_states[1:], dim=1).squeeze(0)
audio_emb = rearrange(audio_emb, "b s d -> s b d")
audio_emb = audio_emb.cpu().detach()
return audio_emb
|
preprocess wav audio file convert to embeddings
Args:
wav_file (str): The path to the WAV file to be processed. This file should be accessible and in WAV format.
Returns:
torch.tensor: Returns an audio embedding as a torch.tensor
|
get_embedding
|
python
|
jdh-algo/JoyHallo
|
joyhallo/datasets/audio_processor.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/datasets/audio_processor.py
|
MIT
|
def preprocess(self, source_image_path: str, cache_dir: str, face_region_ratio: float):
"""
Apply preprocessing to the source image to prepare for face analysis.
Parameters:
source_image_path (str): The path to the source image.
cache_dir (str): The directory to cache intermediate results.
Returns:
None
"""
source_image = Image.open(source_image_path)
ref_image_pil = source_image.convert("RGB")
# 1. image augmentation
pixel_values_ref_img = self._augmentation(ref_image_pil, self.pixel_transform)
# 2.1 detect face
faces = self.face_analysis.get(cv2.cvtColor(np.array(ref_image_pil.copy()), cv2.COLOR_RGB2BGR))
if not faces:
print("No faces detected in the image. Using the entire image as the face region.")
# Use the entire image as the face region
face = {
"bbox": [0, 0, ref_image_pil.width, ref_image_pil.height],
"embedding": np.zeros(512)
}
else:
# Sort faces by size and select the largest one
faces_sorted = sorted(faces, key=lambda x: (x["bbox"][2] - x["bbox"][0]) * (x["bbox"][3] - x["bbox"][1]), reverse=True)
face = faces_sorted[0] # Select the largest face
# 2.2 face embedding
face_emb = face["embedding"]
# 2.3 render face mask
get_mask(source_image_path, cache_dir, face_region_ratio)
file_name = os.path.basename(source_image_path).split(".")[0]
face_mask_pil = Image.open(
os.path.join(cache_dir, f"{file_name}_face_mask.png")).convert("RGB")
face_mask = self._augmentation(face_mask_pil, self.cond_transform)
# 2.4 detect and expand lip, face mask
sep_background_mask = Image.open(
os.path.join(cache_dir, f"{file_name}_sep_background.png"))
sep_face_mask = Image.open(
os.path.join(cache_dir, f"{file_name}_sep_face.png"))
sep_lip_mask = Image.open(
os.path.join(cache_dir, f"{file_name}_sep_lip.png"))
pixel_values_face_mask = [
self._augmentation(sep_face_mask, self.attn_transform_64),
self._augmentation(sep_face_mask, self.attn_transform_32),
self._augmentation(sep_face_mask, self.attn_transform_16),
self._augmentation(sep_face_mask, self.attn_transform_8),
]
pixel_values_lip_mask = [
self._augmentation(sep_lip_mask, self.attn_transform_64),
self._augmentation(sep_lip_mask, self.attn_transform_32),
self._augmentation(sep_lip_mask, self.attn_transform_16),
self._augmentation(sep_lip_mask, self.attn_transform_8),
]
pixel_values_full_mask = [
self._augmentation(sep_background_mask, self.attn_transform_64),
self._augmentation(sep_background_mask, self.attn_transform_32),
self._augmentation(sep_background_mask, self.attn_transform_16),
self._augmentation(sep_background_mask, self.attn_transform_8),
]
pixel_values_full_mask = [mask.view(1, -1)
for mask in pixel_values_full_mask]
pixel_values_face_mask = [mask.view(1, -1)
for mask in pixel_values_face_mask]
pixel_values_lip_mask = [mask.view(1, -1)
for mask in pixel_values_lip_mask]
return pixel_values_ref_img, face_mask, face_emb, pixel_values_full_mask, pixel_values_face_mask, pixel_values_lip_mask
|
Apply preprocessing to the source image to prepare for face analysis.
Parameters:
source_image_path (str): The path to the source image.
cache_dir (str): The directory to cache intermediate results.
Returns:
None
|
preprocess
|
python
|
jdh-algo/JoyHallo
|
joyhallo/datasets/image_processor.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/datasets/image_processor.py
|
MIT
|
def close(self):
"""
Closes the ImageProcessor and releases any resources held by the FaceAnalysis instance.
Args:
self: The ImageProcessor instance.
Returns:
None.
"""
for _, model in self.face_analysis.models.items():
if hasattr(model, "Dispose"):
model.Dispose()
|
Closes the ImageProcessor and releases any resources held by the FaceAnalysis instance.
Args:
self: The ImageProcessor instance.
Returns:
None.
|
close
|
python
|
jdh-algo/JoyHallo
|
joyhallo/datasets/image_processor.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/datasets/image_processor.py
|
MIT
|
def augmentation(self, image, transform, state=None):
"""
Apply data augmentation to the input image.
Args:
image (PIL.Image): The input image.
transform (torchvision.transforms.Compose): The data augmentation transforms.
state (dict, optional): The random state for reproducibility. Defaults to None.
Returns:
PIL.Image: The augmented image.
"""
if state is not None:
torch.set_rng_state(state)
return transform(image)
|
Apply data augmentation to the input image.
Args:
image (PIL.Image): The input image.
transform (torchvision.transforms.Compose): The data augmentation transforms.
state (dict, optional): The random state for reproducibility. Defaults to None.
Returns:
PIL.Image: The augmented image.
|
augmentation
|
python
|
jdh-algo/JoyHallo
|
joyhallo/datasets/mask_image.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/datasets/mask_image.py
|
MIT
|
def augmentation(self, images, transform, state=None):
"""
Apply the given transformation to the input images.
Args:
images (List[PIL.Image] or PIL.Image): The input images to be transformed.
transform (torchvision.transforms.Compose): The transformation to be applied to the images.
state (torch.ByteTensor, optional): The state of the random number generator.
If provided, it will set the RNG state to this value before applying the transformation. Defaults to None.
Returns:
torch.Tensor: The transformed images as a tensor.
If the input was a list of images, the tensor will have shape (f, c, h, w),
where f is the number of images, c is the number of channels, h is the height, and w is the width.
If the input was a single image, the tensor will have shape (c, h, w),
where c is the number of channels, h is the height, and w is the width.
"""
if state is not None:
torch.set_rng_state(state)
if isinstance(images, List):
transformed_images = [transform(img) for img in images]
ret_tensor = torch.stack(transformed_images, dim=0) # (f, c, h, w)
else:
ret_tensor = transform(images) # (c, h, w)
return ret_tensor
|
Apply the given transformation to the input images.
Args:
images (List[PIL.Image] or PIL.Image): The input images to be transformed.
transform (torchvision.transforms.Compose): The transformation to be applied to the images.
state (torch.ByteTensor, optional): The state of the random number generator.
If provided, it will set the RNG state to this value before applying the transformation. Defaults to None.
Returns:
torch.Tensor: The transformed images as a tensor.
If the input was a list of images, the tensor will have shape (f, c, h, w),
where f is the number of images, c is the number of channels, h is the height, and w is the width.
If the input was a single image, the tensor will have shape (c, h, w),
where c is the number of channels, h is the height, and w is the width.
|
augmentation
|
python
|
jdh-algo/JoyHallo
|
joyhallo/datasets/talk_video.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/datasets/talk_video.py
|
MIT
|
def forward(self, x: torch.Tensor, objs: torch.Tensor) -> torch.Tensor:
"""
Apply the Gated Self-Attention mechanism to the input tensor `x` and object tensor `objs`.
Args:
x (torch.Tensor): The input tensor.
objs (torch.Tensor): The object tensor.
Returns:
torch.Tensor: The output tensor after applying Gated Self-Attention.
"""
if not self.enabled:
return x
n_visual = x.shape[1]
objs = self.linear(objs)
x = x + self.alpha_attn.tanh() * self.attn(self.norm1(torch.cat([x, objs], dim=1)))[:, :n_visual, :]
x = x + self.alpha_dense.tanh() * self.ff(self.norm2(x))
return x
|
Apply the Gated Self-Attention mechanism to the input tensor `x` and object tensor `objs`.
Args:
x (torch.Tensor): The input tensor.
objs (torch.Tensor): The object tensor.
Returns:
torch.Tensor: The output tensor after applying Gated Self-Attention.
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/attention.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/attention.py
|
MIT
|
def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0):
"""
Sets the chunk size for feed-forward processing in the transformer block.
Args:
chunk_size (Optional[int]): The size of the chunks to process in feed-forward layers.
If None, the chunk size is set to the maximum possible value.
dim (int, optional): The dimension along which to split the input tensor into chunks. Defaults to 0.
Returns:
None.
"""
self._chunk_size = chunk_size
self._chunk_dim = dim
|
Sets the chunk size for feed-forward processing in the transformer block.
Args:
chunk_size (Optional[int]): The size of the chunks to process in feed-forward layers.
If None, the chunk size is set to the maximum possible value.
dim (int, optional): The dimension along which to split the input tensor into chunks. Defaults to 0.
Returns:
None.
|
set_chunk_feed_forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/attention.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/attention.py
|
MIT
|
def forward(
self,
hidden_states: torch.FloatTensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
timestep: Optional[torch.LongTensor] = None,
cross_attention_kwargs: Dict[str, Any] = None,
class_labels: Optional[torch.LongTensor] = None,
) -> torch.FloatTensor:
"""
This function defines the forward pass of the BasicTransformerBlock.
Args:
self (BasicTransformerBlock):
An instance of the BasicTransformerBlock class.
hidden_states (torch.FloatTensor):
A tensor containing the hidden states.
attention_mask (Optional[torch.FloatTensor], optional):
A tensor containing the attention mask. Defaults to None.
encoder_hidden_states (Optional[torch.FloatTensor], optional):
A tensor containing the encoder hidden states. Defaults to None.
encoder_attention_mask (Optional[torch.FloatTensor], optional):
A tensor containing the encoder attention mask. Defaults to None.
timestep (Optional[torch.LongTensor], optional):
A tensor containing the timesteps. Defaults to None.
cross_attention_kwargs (Dict[str, Any], optional):
Additional cross-attention arguments. Defaults to None.
class_labels (Optional[torch.LongTensor], optional):
A tensor containing the class labels. Defaults to None.
Returns:
torch.FloatTensor:
A tensor containing the transformed hidden states.
"""
# Notice that normalization is always applied before the real computation in the following blocks.
# 0. Self-Attention
batch_size = hidden_states.shape[0]
gate_msa = None
scale_mlp = None
shift_mlp = None
gate_mlp = None
if self.use_ada_layer_norm:
norm_hidden_states = self.norm1(hidden_states, timestep)
elif self.use_ada_layer_norm_zero:
norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
)
elif self.use_layer_norm:
norm_hidden_states = self.norm1(hidden_states)
elif self.use_ada_layer_norm_single:
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (
self.scale_shift_table[None] +
timestep.reshape(batch_size, 6, -1)
).chunk(6, dim=1)
norm_hidden_states = self.norm1(hidden_states)
norm_hidden_states = norm_hidden_states * \
(1 + scale_msa) + shift_msa
norm_hidden_states = norm_hidden_states.squeeze(1)
else:
raise ValueError("Incorrect norm used")
if self.pos_embed is not None:
norm_hidden_states = self.pos_embed(norm_hidden_states)
# 1. Retrieve lora scale.
lora_scale = (
cross_attention_kwargs.get("scale", 1.0)
if cross_attention_kwargs is not None
else 1.0
)
# 2. Prepare GLIGEN inputs
cross_attention_kwargs = (
cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
)
gligen_kwargs = cross_attention_kwargs.pop("gligen", None)
attn_output = self.attn1(
norm_hidden_states,
encoder_hidden_states=(
encoder_hidden_states if self.only_cross_attention else None
),
attention_mask=attention_mask,
**cross_attention_kwargs,
)
if self.use_ada_layer_norm_zero:
attn_output = gate_msa.unsqueeze(1) * attn_output
elif self.use_ada_layer_norm_single:
attn_output = gate_msa * attn_output
hidden_states = attn_output + hidden_states
if hidden_states.ndim == 4:
hidden_states = hidden_states.squeeze(1)
# 2.5 GLIGEN Control
if gligen_kwargs is not None:
hidden_states = self.fuser(hidden_states, gligen_kwargs["objs"])
# 3. Cross-Attention
if self.attn2 is not None:
if self.use_ada_layer_norm:
norm_hidden_states = self.norm2(hidden_states, timestep)
elif self.use_ada_layer_norm_zero or self.use_layer_norm:
norm_hidden_states = self.norm2(hidden_states)
elif self.use_ada_layer_norm_single:
# For PixArt norm2 isn't applied here:
# https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L70C1-L76C103
norm_hidden_states = hidden_states
else:
raise ValueError("Incorrect norm")
if self.pos_embed is not None and self.use_ada_layer_norm_single is False:
norm_hidden_states = self.pos_embed(norm_hidden_states)
attn_output = self.attn2(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
**cross_attention_kwargs,
)
hidden_states = attn_output + hidden_states
# 4. Feed-forward
if not self.use_ada_layer_norm_single:
norm_hidden_states = self.norm3(hidden_states)
if self.use_ada_layer_norm_zero:
norm_hidden_states = (
norm_hidden_states *
(1 + scale_mlp[:, None]) + shift_mlp[:, None]
)
if self.use_ada_layer_norm_single:
norm_hidden_states = self.norm2(hidden_states)
norm_hidden_states = norm_hidden_states * \
(1 + scale_mlp) + shift_mlp
ff_output = self.ff(norm_hidden_states, scale=lora_scale)
if self.use_ada_layer_norm_zero:
ff_output = gate_mlp.unsqueeze(1) * ff_output
elif self.use_ada_layer_norm_single:
ff_output = gate_mlp * ff_output
hidden_states = ff_output + hidden_states
if hidden_states.ndim == 4:
hidden_states = hidden_states.squeeze(1)
return hidden_states
|
This function defines the forward pass of the BasicTransformerBlock.
Args:
self (BasicTransformerBlock):
An instance of the BasicTransformerBlock class.
hidden_states (torch.FloatTensor):
A tensor containing the hidden states.
attention_mask (Optional[torch.FloatTensor], optional):
A tensor containing the attention mask. Defaults to None.
encoder_hidden_states (Optional[torch.FloatTensor], optional):
A tensor containing the encoder hidden states. Defaults to None.
encoder_attention_mask (Optional[torch.FloatTensor], optional):
A tensor containing the encoder attention mask. Defaults to None.
timestep (Optional[torch.LongTensor], optional):
A tensor containing the timesteps. Defaults to None.
cross_attention_kwargs (Dict[str, Any], optional):
Additional cross-attention arguments. Defaults to None.
class_labels (Optional[torch.LongTensor], optional):
A tensor containing the class labels. Defaults to None.
Returns:
torch.FloatTensor:
A tensor containing the transformed hidden states.
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/attention.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/attention.py
|
MIT
|
def __init__(
self,
dim: int,
num_attention_heads: int,
attention_head_dim: int,
dropout=0.0,
cross_attention_dim: Optional[int] = None,
activation_fn: str = "geglu",
num_embeds_ada_norm: Optional[int] = None,
attention_bias: bool = False,
only_cross_attention: bool = False,
upcast_attention: bool = False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
):
"""
The TemporalBasicTransformerBlock class is a PyTorch module that extends the BasicTransformerBlock to include temporal attention mechanisms.
This is particularly useful for video-related tasks, where the model needs to capture the temporal information within the sequence of frames.
The block consists of self-attention, cross-attention, feed-forward, and temporal attention mechanisms.
dim (int): The dimension of the input and output embeddings.
num_attention_heads (int): The number of attention heads in the multi-head self-attention mechanism.
attention_head_dim (int): The dimension of each attention head.
dropout (float, optional): The dropout probability for the attention scores. Defaults to 0.0.
cross_attention_dim (int, optional): The dimension of the cross-attention mechanism. Defaults to None.
activation_fn (str, optional): The activation function used in the feed-forward layer. Defaults to "geglu".
num_embeds_ada_norm (int, optional): The number of embeddings for adaptive normalization. Defaults to None.
attention_bias (bool, optional): If True, uses bias in the attention mechanism. Defaults to False.
only_cross_attention (bool, optional): If True, only uses cross-attention. Defaults to False.
upcast_attention (bool, optional): If True, upcasts the attention mechanism for better performance. Defaults to False.
unet_use_cross_frame_attention (bool, optional): If True, uses cross-frame attention in the UNet model. Defaults to None.
unet_use_temporal_attention (bool, optional): If True, uses temporal attention in the UNet model. Defaults to None.
Forward method:
hidden_states (torch.FloatTensor): The input hidden states.
encoder_hidden_states (torch.FloatTensor, optional): The encoder hidden states. Defaults to None.
timestep (torch.LongTensor, optional): The current timestep for the transformer model. Defaults to None.
attention_mask (torch.FloatTensor, optional): The attention mask for the self-attention mechanism. Defaults to None.
video_length (int, optional): The length of the video sequence. Defaults to None.
Returns:
torch.FloatTensor: The output hidden states after passing through the TemporalBasicTransformerBlock.
"""
super().__init__()
self.only_cross_attention = only_cross_attention
self.use_ada_layer_norm = num_embeds_ada_norm is not None
self.unet_use_cross_frame_attention = unet_use_cross_frame_attention
self.unet_use_temporal_attention = unet_use_temporal_attention
# SC-Attn
self.attn1 = Attention(
query_dim=dim,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
upcast_attention=upcast_attention,
)
self.norm1 = (
AdaLayerNorm(dim, num_embeds_ada_norm)
if self.use_ada_layer_norm
else nn.LayerNorm(dim)
)
# Cross-Attn
if cross_attention_dim is not None:
self.attn2 = Attention(
query_dim=dim,
cross_attention_dim=cross_attention_dim,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
upcast_attention=upcast_attention,
)
else:
self.attn2 = None
if cross_attention_dim is not None:
self.norm2 = (
AdaLayerNorm(dim, num_embeds_ada_norm)
if self.use_ada_layer_norm
else nn.LayerNorm(dim)
)
else:
self.norm2 = None
# Feed-forward
self.ff = FeedForward(dim, dropout=dropout,
activation_fn=activation_fn)
self.norm3 = nn.LayerNorm(dim)
self.use_ada_layer_norm_zero = False
# Temp-Attn
# assert unet_use_temporal_attention is not None
if unet_use_temporal_attention is None:
unet_use_temporal_attention = False
if unet_use_temporal_attention:
self.attn_temp = Attention(
query_dim=dim,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
upcast_attention=upcast_attention,
)
nn.init.zeros_(self.attn_temp.to_out[0].weight.data)
self.norm_temp = (
AdaLayerNorm(dim, num_embeds_ada_norm)
if self.use_ada_layer_norm
else nn.LayerNorm(dim)
)
|
The TemporalBasicTransformerBlock class is a PyTorch module that extends the BasicTransformerBlock to include temporal attention mechanisms.
This is particularly useful for video-related tasks, where the model needs to capture the temporal information within the sequence of frames.
The block consists of self-attention, cross-attention, feed-forward, and temporal attention mechanisms.
dim (int): The dimension of the input and output embeddings.
num_attention_heads (int): The number of attention heads in the multi-head self-attention mechanism.
attention_head_dim (int): The dimension of each attention head.
dropout (float, optional): The dropout probability for the attention scores. Defaults to 0.0.
cross_attention_dim (int, optional): The dimension of the cross-attention mechanism. Defaults to None.
activation_fn (str, optional): The activation function used in the feed-forward layer. Defaults to "geglu".
num_embeds_ada_norm (int, optional): The number of embeddings for adaptive normalization. Defaults to None.
attention_bias (bool, optional): If True, uses bias in the attention mechanism. Defaults to False.
only_cross_attention (bool, optional): If True, only uses cross-attention. Defaults to False.
upcast_attention (bool, optional): If True, upcasts the attention mechanism for better performance. Defaults to False.
unet_use_cross_frame_attention (bool, optional): If True, uses cross-frame attention in the UNet model. Defaults to None.
unet_use_temporal_attention (bool, optional): If True, uses temporal attention in the UNet model. Defaults to None.
Forward method:
hidden_states (torch.FloatTensor): The input hidden states.
encoder_hidden_states (torch.FloatTensor, optional): The encoder hidden states. Defaults to None.
timestep (torch.LongTensor, optional): The current timestep for the transformer model. Defaults to None.
attention_mask (torch.FloatTensor, optional): The attention mask for the self-attention mechanism. Defaults to None.
video_length (int, optional): The length of the video sequence. Defaults to None.
Returns:
torch.FloatTensor: The output hidden states after passing through the TemporalBasicTransformerBlock.
|
__init__
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/attention.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/attention.py
|
MIT
|
def forward(
self,
hidden_states,
encoder_hidden_states=None,
timestep=None,
attention_mask=None,
video_length=None,
):
"""
Forward pass for the TemporalBasicTransformerBlock.
Args:
hidden_states (torch.FloatTensor): The input hidden states with shape (batch_size, seq_len, dim).
encoder_hidden_states (torch.FloatTensor, optional): The encoder hidden states with shape (batch_size, src_seq_len, dim).
timestep (torch.LongTensor, optional): The timestep for the transformer block.
attention_mask (torch.FloatTensor, optional): The attention mask with shape (batch_size, seq_len, seq_len).
video_length (int, optional): The length of the video sequence.
Returns:
torch.FloatTensor: The output tensor after passing through the transformer block with shape (batch_size, seq_len, dim).
"""
norm_hidden_states = (
self.norm1(hidden_states, timestep)
if self.use_ada_layer_norm
else self.norm1(hidden_states)
)
if self.unet_use_cross_frame_attention:
hidden_states = (
self.attn1(
norm_hidden_states,
attention_mask=attention_mask,
video_length=video_length,
)
+ hidden_states
)
else:
hidden_states = (
self.attn1(norm_hidden_states, attention_mask=attention_mask)
+ hidden_states
)
if self.attn2 is not None:
# Cross-Attention
norm_hidden_states = (
self.norm2(hidden_states, timestep)
if self.use_ada_layer_norm
else self.norm2(hidden_states)
)
hidden_states = (
self.attn2(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
)
+ hidden_states
)
# Feed-forward
hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states
# Temporal-Attention
if self.unet_use_temporal_attention:
d = hidden_states.shape[1]
hidden_states = rearrange(
hidden_states, "(b f) d c -> (b d) f c", f=video_length
)
norm_hidden_states = (
self.norm_temp(hidden_states, timestep)
if self.use_ada_layer_norm
else self.norm_temp(hidden_states)
)
hidden_states = self.attn_temp(norm_hidden_states) + hidden_states
hidden_states = rearrange(
hidden_states, "(b d) f c -> (b f) d c", d=d)
return hidden_states
|
Forward pass for the TemporalBasicTransformerBlock.
Args:
hidden_states (torch.FloatTensor): The input hidden states with shape (batch_size, seq_len, dim).
encoder_hidden_states (torch.FloatTensor, optional): The encoder hidden states with shape (batch_size, src_seq_len, dim).
timestep (torch.LongTensor, optional): The timestep for the transformer block.
attention_mask (torch.FloatTensor, optional): The attention mask with shape (batch_size, seq_len, seq_len).
video_length (int, optional): The length of the video sequence.
Returns:
torch.FloatTensor: The output tensor after passing through the transformer block with shape (batch_size, seq_len, dim).
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/attention.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/attention.py
|
MIT
|
def __init__(
self,
dim: int,
num_attention_heads: int,
attention_head_dim: int,
dropout=0.0,
cross_attention_dim: Optional[int] = None,
activation_fn: str = "geglu",
num_embeds_ada_norm: Optional[int] = None,
attention_bias: bool = False,
only_cross_attention: bool = False,
upcast_attention: bool = False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
depth=0,
unet_block_name=None,
stack_enable_blocks_name: Optional[List[str]] = None,
stack_enable_blocks_depth: Optional[List[int]] = None,
):
"""
Initializes the AudioTemporalBasicTransformerBlock module.
Args:
dim (int): The dimension of the input and output embeddings.
num_attention_heads (int): The number of attention heads in the multi-head self-attention mechanism.
attention_head_dim (int): The dimension of each attention head.
dropout (float, optional): The dropout probability for the attention mechanism. Defaults to 0.0.
cross_attention_dim (Optional[int], optional): The dimension of the cross-attention mechanism. Defaults to None.
activation_fn (str, optional): The activation function to be used in the feed-forward network. Defaults to "geglu".
num_embeds_ada_norm (Optional[int], optional): The number of embeddings for adaptive normalization. Defaults to None.
attention_bias (bool, optional): If True, uses bias in the attention mechanism. Defaults to False.
only_cross_attention (bool, optional): If True, only uses cross-attention. Defaults to False.
upcast_attention (bool, optional): If True, upcasts the attention mechanism to float32. Defaults to False.
unet_use_cross_frame_attention (Optional[bool], optional): If True, uses cross-frame attention in UNet. Defaults to None.
unet_use_temporal_attention (Optional[bool], optional): If True, uses temporal attention in UNet. Defaults to None.
depth (int, optional): The depth of the transformer block. Defaults to 0.
unet_block_name (Optional[str], optional): The name of the UNet block. Defaults to None.
stack_enable_blocks_name (Optional[List[str]], optional): The list of enabled blocks in the stack. Defaults to None.
stack_enable_blocks_depth (Optional[List[int]], optional): The list of depths for the enabled blocks in the stack. Defaults to None.
"""
super().__init__()
self.only_cross_attention = only_cross_attention
self.use_ada_layer_norm = num_embeds_ada_norm is not None
self.unet_use_cross_frame_attention = unet_use_cross_frame_attention
self.unet_use_temporal_attention = unet_use_temporal_attention
self.unet_block_name = unet_block_name
self.depth = depth
zero_conv_full = nn.Conv2d(
dim, dim, kernel_size=1)
self.zero_conv_full = zero_module(zero_conv_full)
zero_conv_face = nn.Conv2d(
dim, dim, kernel_size=1)
self.zero_conv_face = zero_module(zero_conv_face)
zero_conv_lip = nn.Conv2d(
dim, dim, kernel_size=1)
self.zero_conv_lip = zero_module(zero_conv_lip)
# SC-Attn
self.attn1 = Attention(
query_dim=dim,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
upcast_attention=upcast_attention,
)
self.norm1 = (
AdaLayerNorm(dim, num_embeds_ada_norm)
if self.use_ada_layer_norm
else nn.LayerNorm(dim)
)
# Cross-Attn
if cross_attention_dim is not None:
if (stack_enable_blocks_name is not None and
stack_enable_blocks_depth is not None and
self.unet_block_name in stack_enable_blocks_name and
self.depth in stack_enable_blocks_depth):
self.attn2_0 = Attention(
query_dim=dim,
cross_attention_dim=cross_attention_dim,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
upcast_attention=upcast_attention,
)
self.attn2 = None
else:
self.attn2 = Attention(
query_dim=dim,
cross_attention_dim=cross_attention_dim,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
upcast_attention=upcast_attention,
)
self.attn2_0=None
else:
self.attn2 = None
self.attn2_0 = None
if cross_attention_dim is not None:
self.norm2 = (
AdaLayerNorm(dim, num_embeds_ada_norm)
if self.use_ada_layer_norm
else nn.LayerNorm(dim)
)
else:
self.norm2 = None
# Feed-forward
self.ff = FeedForward(dim, dropout=dropout,
activation_fn=activation_fn)
self.norm3 = nn.LayerNorm(dim)
self.use_ada_layer_norm_zero = False
|
Initializes the AudioTemporalBasicTransformerBlock module.
Args:
dim (int): The dimension of the input and output embeddings.
num_attention_heads (int): The number of attention heads in the multi-head self-attention mechanism.
attention_head_dim (int): The dimension of each attention head.
dropout (float, optional): The dropout probability for the attention mechanism. Defaults to 0.0.
cross_attention_dim (Optional[int], optional): The dimension of the cross-attention mechanism. Defaults to None.
activation_fn (str, optional): The activation function to be used in the feed-forward network. Defaults to "geglu".
num_embeds_ada_norm (Optional[int], optional): The number of embeddings for adaptive normalization. Defaults to None.
attention_bias (bool, optional): If True, uses bias in the attention mechanism. Defaults to False.
only_cross_attention (bool, optional): If True, only uses cross-attention. Defaults to False.
upcast_attention (bool, optional): If True, upcasts the attention mechanism to float32. Defaults to False.
unet_use_cross_frame_attention (Optional[bool], optional): If True, uses cross-frame attention in UNet. Defaults to None.
unet_use_temporal_attention (Optional[bool], optional): If True, uses temporal attention in UNet. Defaults to None.
depth (int, optional): The depth of the transformer block. Defaults to 0.
unet_block_name (Optional[str], optional): The name of the UNet block. Defaults to None.
stack_enable_blocks_name (Optional[List[str]], optional): The list of enabled blocks in the stack. Defaults to None.
stack_enable_blocks_depth (Optional[List[int]], optional): The list of depths for the enabled blocks in the stack. Defaults to None.
|
__init__
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/attention.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/attention.py
|
MIT
|
def forward(
self,
hidden_states,
encoder_hidden_states=None,
timestep=None,
attention_mask=None,
full_mask=None,
face_mask=None,
lip_mask=None,
motion_scale=None,
video_length=None,
):
"""
Forward pass for the AudioTemporalBasicTransformerBlock.
Args:
hidden_states (torch.FloatTensor): The input hidden states.
encoder_hidden_states (torch.FloatTensor, optional): The encoder hidden states. Defaults to None.
timestep (torch.LongTensor, optional): The timestep for the transformer block. Defaults to None.
attention_mask (torch.FloatTensor, optional): The attention mask. Defaults to None.
full_mask (torch.FloatTensor, optional): The full mask. Defaults to None.
face_mask (torch.FloatTensor, optional): The face mask. Defaults to None.
lip_mask (torch.FloatTensor, optional): The lip mask. Defaults to None.
video_length (int, optional): The length of the video. Defaults to None.
Returns:
torch.FloatTensor: The output tensor after passing through the AudioTemporalBasicTransformerBlock.
"""
norm_hidden_states = (
self.norm1(hidden_states, timestep)
if self.use_ada_layer_norm
else self.norm1(hidden_states)
)
if self.unet_use_cross_frame_attention:
hidden_states = (
self.attn1(
norm_hidden_states,
attention_mask=attention_mask,
video_length=video_length,
)
+ hidden_states
)
else:
hidden_states = (
self.attn1(norm_hidden_states, attention_mask=attention_mask)
+ hidden_states
)
if self.attn2 is not None:
# Cross-Attention
norm_hidden_states = (
self.norm2(hidden_states, timestep)
if self.use_ada_layer_norm
else self.norm2(hidden_states)
)
hidden_states = self.attn2(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
) + hidden_states
elif self.attn2_0 is not None:
norm_hidden_states = (
self.norm2(hidden_states, timestep)
if self.use_ada_layer_norm
else self.norm2(hidden_states)
)
level = self.depth
all_hidden_states = self.attn2_0(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
)
full_hidden_states = (
all_hidden_states * full_mask[level][:, :, None]
)
bz, sz, c = full_hidden_states.shape
sz_sqrt = int(sz ** 0.5)
full_hidden_states = full_hidden_states.reshape(
bz, sz_sqrt, sz_sqrt, c).permute(0, 3, 1, 2)
full_hidden_states = self.zero_conv_full(full_hidden_states).permute(0, 2, 3, 1).reshape(bz, -1, c)
face_hidden_state = (
all_hidden_states * face_mask[level][:, :, None]
)
face_hidden_state = face_hidden_state.reshape(
bz, sz_sqrt, sz_sqrt, c).permute(0, 3, 1, 2)
face_hidden_state = self.zero_conv_face(
face_hidden_state).permute(0, 2, 3, 1).reshape(bz, -1, c)
lip_hidden_state = (
all_hidden_states * lip_mask[level][:, :, None]
) # [32, 4096, 320]
lip_hidden_state = lip_hidden_state.reshape(
bz, sz_sqrt, sz_sqrt, c).permute(0, 3, 1, 2)
lip_hidden_state = self.zero_conv_lip(
lip_hidden_state).permute(0, 2, 3, 1).reshape(bz, -1, c)
if motion_scale is not None:
hidden_states = (
motion_scale[0] * full_hidden_states +
motion_scale[1] * face_hidden_state +
motion_scale[2] * lip_hidden_state + hidden_states
)
else:
hidden_states = (
full_hidden_states +
face_hidden_state +
lip_hidden_state + hidden_states
)
# Feed-forward
hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states
return hidden_states
|
Forward pass for the AudioTemporalBasicTransformerBlock.
Args:
hidden_states (torch.FloatTensor): The input hidden states.
encoder_hidden_states (torch.FloatTensor, optional): The encoder hidden states. Defaults to None.
timestep (torch.LongTensor, optional): The timestep for the transformer block. Defaults to None.
attention_mask (torch.FloatTensor, optional): The attention mask. Defaults to None.
full_mask (torch.FloatTensor, optional): The full mask. Defaults to None.
face_mask (torch.FloatTensor, optional): The face mask. Defaults to None.
lip_mask (torch.FloatTensor, optional): The lip mask. Defaults to None.
video_length (int, optional): The length of the video. Defaults to None.
Returns:
torch.FloatTensor: The output tensor after passing through the AudioTemporalBasicTransformerBlock.
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/attention.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/attention.py
|
MIT
|
def zero_module(module):
"""
Zeroes out the parameters of a given module.
Args:
module (nn.Module): The module whose parameters need to be zeroed out.
Returns:
None.
"""
for p in module.parameters():
nn.init.zeros_(p)
return module
|
Zeroes out the parameters of a given module.
Args:
module (nn.Module): The module whose parameters need to be zeroed out.
Returns:
None.
|
zero_module
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/attention.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/attention.py
|
MIT
|
def forward(self, audio_embeds):
"""
Defines the forward pass for the AudioProjModel.
Parameters:
audio_embeds (torch.Tensor): The input audio embeddings with shape (batch_size, video_length, blocks, channels).
Returns:
context_tokens (torch.Tensor): The output context tokens with shape (batch_size, video_length, context_tokens, output_dim).
"""
# merge
video_length = audio_embeds.shape[1]
audio_embeds = rearrange(audio_embeds, "bz f w b c -> (bz f) w b c")
batch_size, window_size, blocks, channels = audio_embeds.shape
audio_embeds = audio_embeds.view(batch_size, window_size * blocks * channels)
audio_embeds = torch.relu(self.proj1(audio_embeds))
audio_embeds = torch.relu(self.proj2(audio_embeds))
context_tokens = self.proj3(audio_embeds).reshape(
batch_size, self.context_tokens, self.output_dim
)
context_tokens = self.norm(context_tokens)
context_tokens = rearrange(
context_tokens, "(bz f) m c -> bz f m c", f=video_length
)
return context_tokens
|
Defines the forward pass for the AudioProjModel.
Parameters:
audio_embeds (torch.Tensor): The input audio embeddings with shape (batch_size, video_length, blocks, channels).
Returns:
context_tokens (torch.Tensor): The output context tokens with shape (batch_size, video_length, context_tokens, output_dim).
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/audio_proj.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/audio_proj.py
|
MIT
|
def forward(self, conditioning):
"""
Forward pass of the FaceLocator model.
Args:
conditioning (Tensor): The input conditioning tensor.
Returns:
Tensor: The output embedding tensor.
"""
embedding = self.conv_in(conditioning)
embedding = F.silu(embedding)
for block in self.blocks:
embedding = block(embedding)
embedding = F.silu(embedding)
embedding = self.conv_out(embedding)
return embedding
|
Forward pass of the FaceLocator model.
Args:
conditioning (Tensor): The input conditioning tensor.
Returns:
Tensor: The output embedding tensor.
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/face_locator.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/face_locator.py
|
MIT
|
def forward(self, image_embeds):
"""
Forward pass of the ImageProjModel, which takes in image embeddings and returns the
projected tokens after reshaping and normalization.
Args:
image_embeds (torch.Tensor): The input image embeddings, with shape
batch_size x num_image_tokens x clip_embeddings_dim.
Returns:
clip_extra_context_tokens (torch.Tensor): The projected tokens after reshaping
and normalization, with shape batch_size x (clip_extra_context_tokens *
cross_attention_dim).
"""
embeds = image_embeds
clip_extra_context_tokens = self.proj(embeds).reshape(
-1, self.clip_extra_context_tokens, self.cross_attention_dim
)
clip_extra_context_tokens = self.norm(clip_extra_context_tokens)
return clip_extra_context_tokens
|
Forward pass of the ImageProjModel, which takes in image embeddings and returns the
projected tokens after reshaping and normalization.
Args:
image_embeds (torch.Tensor): The input image embeddings, with shape
batch_size x num_image_tokens x clip_embeddings_dim.
Returns:
clip_extra_context_tokens (torch.Tensor): The projected tokens after reshaping
and normalization, with shape batch_size x (clip_extra_context_tokens *
cross_attention_dim).
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/image_proj.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/image_proj.py
|
MIT
|
def zero_module(module):
"""
Zero out the parameters of a module and return it.
Args:
- module: A PyTorch module to zero out its parameters.
Returns:
A zeroed out PyTorch module.
"""
for p in module.parameters():
p.detach().zero_()
return module
|
Zero out the parameters of a module and return it.
Args:
- module: A PyTorch module to zero out its parameters.
Returns:
A zeroed out PyTorch module.
|
zero_module
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/motion_module.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/motion_module.py
|
MIT
|
def get_motion_module(in_channels, motion_module_type: str, motion_module_kwargs: dict):
"""
This function returns a motion module based on the given type and parameters.
Args:
- in_channels (int): The number of input channels for the motion module.
- motion_module_type (str): The type of motion module to create. Currently, only "Vanilla" is supported.
- motion_module_kwargs (dict): Additional keyword arguments to pass to the motion module constructor.
Returns:
VanillaTemporalModule: The created motion module.
Raises:
ValueError: If an unsupported motion_module_type is provided.
"""
if motion_module_type == "Vanilla":
return VanillaTemporalModule(
in_channels=in_channels,
**motion_module_kwargs,
)
raise ValueError
|
This function returns a motion module based on the given type and parameters.
Args:
- in_channels (int): The number of input channels for the motion module.
- motion_module_type (str): The type of motion module to create. Currently, only "Vanilla" is supported.
- motion_module_kwargs (dict): Additional keyword arguments to pass to the motion module constructor.
Returns:
VanillaTemporalModule: The created motion module.
Raises:
ValueError: If an unsupported motion_module_type is provided.
|
get_motion_module
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/motion_module.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/motion_module.py
|
MIT
|
def forward(
self,
input_tensor,
encoder_hidden_states,
attention_mask=None,
):
"""
Forward pass of the TemporalTransformer3DModel.
Args:
hidden_states (torch.Tensor): The hidden states of the model.
encoder_hidden_states (torch.Tensor, optional): The hidden states of the encoder.
attention_mask (torch.Tensor, optional): The attention mask.
Returns:
torch.Tensor: The output tensor after the forward pass.
"""
hidden_states = input_tensor
hidden_states = self.temporal_transformer(
hidden_states, encoder_hidden_states
)
output = hidden_states
return output
|
Forward pass of the TemporalTransformer3DModel.
Args:
hidden_states (torch.Tensor): The hidden states of the model.
encoder_hidden_states (torch.Tensor, optional): The hidden states of the encoder.
attention_mask (torch.Tensor, optional): The attention mask.
Returns:
torch.Tensor: The output tensor after the forward pass.
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/motion_module.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/motion_module.py
|
MIT
|
def forward(self, hidden_states, encoder_hidden_states=None):
"""
Forward pass for the TemporalTransformer3DModel.
Args:
hidden_states (torch.Tensor): The input hidden states with shape (batch_size, sequence_length, in_channels).
encoder_hidden_states (torch.Tensor, optional): The encoder hidden states with shape (batch_size, encoder_sequence_length, in_channels).
Returns:
torch.Tensor: The output hidden states with shape (batch_size, sequence_length, in_channels).
"""
assert (
hidden_states.dim() == 5
), f"Expected hidden_states to have ndim=5, but got ndim={hidden_states.dim()}."
video_length = hidden_states.shape[2]
hidden_states = rearrange(hidden_states, "b c f h w -> (b f) c h w")
batch, _, height, weight = hidden_states.shape
residual = hidden_states
hidden_states = self.norm(hidden_states)
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(
batch, height * weight, inner_dim
)
hidden_states = self.proj_in(hidden_states)
# Transformer Blocks
for block in self.transformer_blocks:
hidden_states = block(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
video_length=video_length,
)
# output
hidden_states = self.proj_out(hidden_states)
hidden_states = (
hidden_states.reshape(batch, height, weight, inner_dim)
.permute(0, 3, 1, 2)
.contiguous()
)
output = hidden_states + residual
output = rearrange(output, "(b f) c h w -> b c f h w", f=video_length)
return output
|
Forward pass for the TemporalTransformer3DModel.
Args:
hidden_states (torch.Tensor): The input hidden states with shape (batch_size, sequence_length, in_channels).
encoder_hidden_states (torch.Tensor, optional): The encoder hidden states with shape (batch_size, encoder_sequence_length, in_channels).
Returns:
torch.Tensor: The output hidden states with shape (batch_size, sequence_length, in_channels).
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/motion_module.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/motion_module.py
|
MIT
|
def forward(
self,
hidden_states,
encoder_hidden_states=None,
video_length=None,
):
"""
Forward pass for the TemporalTransformerBlock.
Args:
hidden_states (torch.Tensor): The input hidden states with shape
(batch_size, video_length, in_channels).
encoder_hidden_states (torch.Tensor, optional): The encoder hidden states
with shape (batch_size, encoder_length, in_channels).
video_length (int, optional): The length of the video.
Returns:
torch.Tensor: The output hidden states with shape
(batch_size, video_length, in_channels).
"""
for attention_block, norm in zip(self.attention_blocks, self.norms):
norm_hidden_states = norm(hidden_states)
hidden_states = (
attention_block(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states
if attention_block.is_cross_attention
else None,
video_length=video_length,
)
+ hidden_states
)
hidden_states = self.ff(self.ff_norm(hidden_states)) + hidden_states
output = hidden_states
return output
|
Forward pass for the TemporalTransformerBlock.
Args:
hidden_states (torch.Tensor): The input hidden states with shape
(batch_size, video_length, in_channels).
encoder_hidden_states (torch.Tensor, optional): The encoder hidden states
with shape (batch_size, encoder_length, in_channels).
video_length (int, optional): The length of the video.
Returns:
torch.Tensor: The output hidden states with shape
(batch_size, video_length, in_channels).
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/motion_module.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/motion_module.py
|
MIT
|
def set_use_memory_efficient_attention_xformers(
self,
use_memory_efficient_attention_xformers: bool,
attention_op = None,
):
"""
Sets the use of memory-efficient attention xformers for the VersatileAttention class.
Args:
use_memory_efficient_attention_xformers (bool): A boolean flag indicating whether to use memory-efficient attention xformers or not.
Returns:
None
"""
if use_memory_efficient_attention_xformers:
if not is_xformers_available():
raise ModuleNotFoundError(
(
"Refer to https://github.com/facebookresearch/xformers for more information on how to install"
" xformers"
),
name="xformers",
)
if not torch.cuda.is_available():
raise ValueError(
"torch.cuda.is_available() should be True but is False. xformers' memory efficient attention is"
" only available for GPU "
)
try:
# Make sure we can run the memory efficient attention
_ = xformers.ops.memory_efficient_attention(
torch.randn((1, 2, 40), device="cuda"),
torch.randn((1, 2, 40), device="cuda"),
torch.randn((1, 2, 40), device="cuda"),
)
except Exception as e:
raise e
processor = AttnProcessor()
else:
processor = AttnProcessor()
self.set_processor(processor)
|
Sets the use of memory-efficient attention xformers for the VersatileAttention class.
Args:
use_memory_efficient_attention_xformers (bool): A boolean flag indicating whether to use memory-efficient attention xformers or not.
Returns:
None
|
set_use_memory_efficient_attention_xformers
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/motion_module.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/motion_module.py
|
MIT
|
def forward(
self,
hidden_states,
encoder_hidden_states=None,
attention_mask=None,
video_length=None,
**cross_attention_kwargs,
):
"""
Args:
hidden_states (`torch.Tensor`):
The hidden states to be passed through the model.
encoder_hidden_states (`torch.Tensor`, optional):
The encoder hidden states to be passed through the model.
attention_mask (`torch.Tensor`, optional):
The attention mask to be used in the model.
video_length (`int`, optional):
The length of the video.
cross_attention_kwargs (`dict`, optional):
Additional keyword arguments to be used for cross-attention.
Returns:
`torch.Tensor`:
The output tensor after passing through the model.
"""
if self.attention_mode == "Temporal":
d = hidden_states.shape[1] # d means HxW
hidden_states = rearrange(
hidden_states, "(b f) d c -> (b d) f c", f=video_length
)
if self.pos_encoder is not None:
hidden_states = self.pos_encoder(hidden_states)
encoder_hidden_states = (
repeat(encoder_hidden_states, "b n c -> (b d) n c", d=d)
if encoder_hidden_states is not None
else encoder_hidden_states
)
else:
raise NotImplementedError
hidden_states = self.processor(
self,
hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
**cross_attention_kwargs,
)
if self.attention_mode == "Temporal":
hidden_states = rearrange(
hidden_states, "(b d) f c -> (b f) d c", d=d)
return hidden_states
|
Args:
hidden_states (`torch.Tensor`):
The hidden states to be passed through the model.
encoder_hidden_states (`torch.Tensor`, optional):
The encoder hidden states to be passed through the model.
attention_mask (`torch.Tensor`, optional):
The attention mask to be used in the model.
video_length (`int`, optional):
The length of the video.
cross_attention_kwargs (`dict`, optional):
Additional keyword arguments to be used for cross-attention.
Returns:
`torch.Tensor`:
The output tensor after passing through the model.
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/motion_module.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/motion_module.py
|
MIT
|
def torch_dfs(model: torch.nn.Module):
"""
Perform a depth-first search (DFS) traversal on a PyTorch model's neural network architecture.
This function recursively traverses all the children modules of a given PyTorch model and returns a list
containing all the modules in the model's architecture. The DFS approach starts with the input model and
explores its children modules depth-wise before backtracking and exploring other branches.
Args:
model (torch.nn.Module): The root module of the neural network to traverse.
Returns:
list: A list of all the modules in the model's architecture.
"""
result = [model]
for child in model.children():
result += torch_dfs(child)
return result
|
Perform a depth-first search (DFS) traversal on a PyTorch model's neural network architecture.
This function recursively traverses all the children modules of a given PyTorch model and returns a list
containing all the modules in the model's architecture. The DFS approach starts with the input model and
explores its children modules depth-wise before backtracking and exploring other branches.
Args:
model (torch.nn.Module): The root module of the neural network to traverse.
Returns:
list: A list of all the modules in the model's architecture.
|
torch_dfs
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/mutual_self_attention.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/mutual_self_attention.py
|
MIT
|
def __init__(
self,
unet,
mode="write",
do_classifier_free_guidance=False,
attention_auto_machine_weight=float("inf"),
gn_auto_machine_weight=1.0,
style_fidelity=1.0,
reference_attn=True,
reference_adain=False,
fusion_blocks="midup",
batch_size=1,
) -> None:
"""
Initializes the ReferenceAttentionControl class.
Args:
unet (torch.nn.Module): The UNet model.
mode (str, optional): The mode of operation. Defaults to "write".
do_classifier_free_guidance (bool, optional): Whether to do classifier-free guidance. Defaults to False.
attention_auto_machine_weight (float, optional): The weight for attention auto-machine. Defaults to infinity.
gn_auto_machine_weight (float, optional): The weight for group-norm auto-machine. Defaults to 1.0.
style_fidelity (float, optional): The style fidelity. Defaults to 1.0.
reference_attn (bool, optional): Whether to use reference attention. Defaults to True.
reference_adain (bool, optional): Whether to use reference AdaIN. Defaults to False.
fusion_blocks (str, optional): The fusion blocks to use. Defaults to "midup".
batch_size (int, optional): The batch size. Defaults to 1.
Raises:
ValueError: If the mode is not recognized.
ValueError: If the fusion blocks are not recognized.
"""
# 10. Modify self attention and group norm
self.unet = unet
assert mode in ["read", "write"]
assert fusion_blocks in ["midup", "full"]
self.reference_attn = reference_attn
self.reference_adain = reference_adain
self.fusion_blocks = fusion_blocks
self.register_reference_hooks(
mode,
do_classifier_free_guidance,
attention_auto_machine_weight,
gn_auto_machine_weight,
style_fidelity,
reference_attn,
reference_adain,
fusion_blocks,
batch_size=batch_size,
)
|
Initializes the ReferenceAttentionControl class.
Args:
unet (torch.nn.Module): The UNet model.
mode (str, optional): The mode of operation. Defaults to "write".
do_classifier_free_guidance (bool, optional): Whether to do classifier-free guidance. Defaults to False.
attention_auto_machine_weight (float, optional): The weight for attention auto-machine. Defaults to infinity.
gn_auto_machine_weight (float, optional): The weight for group-norm auto-machine. Defaults to 1.0.
style_fidelity (float, optional): The style fidelity. Defaults to 1.0.
reference_attn (bool, optional): Whether to use reference attention. Defaults to True.
reference_adain (bool, optional): Whether to use reference AdaIN. Defaults to False.
fusion_blocks (str, optional): The fusion blocks to use. Defaults to "midup".
batch_size (int, optional): The batch size. Defaults to 1.
Raises:
ValueError: If the mode is not recognized.
ValueError: If the fusion blocks are not recognized.
|
__init__
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/mutual_self_attention.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/mutual_self_attention.py
|
MIT
|
def update(self, writer, dtype=torch.float16):
"""
Update the model's parameters.
Args:
writer (torch.nn.Module): The model's writer object.
dtype (torch.dtype, optional): The data type to be used for the update. Defaults to torch.float16.
Returns:
None.
"""
if self.reference_attn:
if self.fusion_blocks == "midup":
reader_attn_modules = [
module
for module in (
torch_dfs(self.unet.mid_block) +
torch_dfs(self.unet.up_blocks)
)
if isinstance(module, TemporalBasicTransformerBlock)
]
writer_attn_modules = [
module
for module in (
torch_dfs(writer.unet.mid_block)
+ torch_dfs(writer.unet.up_blocks)
)
if isinstance(module, BasicTransformerBlock)
]
elif self.fusion_blocks == "full":
reader_attn_modules = [
module
for module in torch_dfs(self.unet)
if isinstance(module, TemporalBasicTransformerBlock)
]
writer_attn_modules = [
module
for module in torch_dfs(writer.unet)
if isinstance(module, BasicTransformerBlock)
]
assert len(reader_attn_modules) == len(writer_attn_modules)
reader_attn_modules = sorted(
reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0]
)
writer_attn_modules = sorted(
writer_attn_modules, key=lambda x: -x.norm1.normalized_shape[0]
)
for r, w in zip(reader_attn_modules, writer_attn_modules):
r.bank = [v.clone().to(dtype) for v in w.bank]
|
Update the model's parameters.
Args:
writer (torch.nn.Module): The model's writer object.
dtype (torch.dtype, optional): The data type to be used for the update. Defaults to torch.float16.
Returns:
None.
|
update
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/mutual_self_attention.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/mutual_self_attention.py
|
MIT
|
def clear(self):
"""
Clears the attention bank of all reader attention modules.
This method is used when the `reference_attn` attribute is set to `True`.
It clears the attention bank of all reader attention modules inside the UNet
model based on the selected `fusion_blocks` mode.
If `fusion_blocks` is set to "midup", it searches for reader attention modules
in both the mid block and up blocks of the UNet model. If `fusion_blocks` is set
to "full", it searches for reader attention modules in the entire UNet model.
It sorts the reader attention modules by the number of neurons in their
`norm1.normalized_shape[0]` attribute in descending order. This sorting ensures
that the modules with more neurons are cleared first.
Finally, it iterates through the sorted list of reader attention modules and
calls the `clear()` method on each module's `bank` attribute to clear the
attention bank.
"""
if self.reference_attn:
if self.fusion_blocks == "midup":
reader_attn_modules = [
module
for module in (
torch_dfs(self.unet.mid_block) +
torch_dfs(self.unet.up_blocks)
)
if isinstance(module, (BasicTransformerBlock, TemporalBasicTransformerBlock))
]
elif self.fusion_blocks == "full":
reader_attn_modules = [
module
for module in torch_dfs(self.unet)
if isinstance(module, (BasicTransformerBlock, TemporalBasicTransformerBlock))
]
reader_attn_modules = sorted(
reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0]
)
for r in reader_attn_modules:
r.bank.clear()
|
Clears the attention bank of all reader attention modules.
This method is used when the `reference_attn` attribute is set to `True`.
It clears the attention bank of all reader attention modules inside the UNet
model based on the selected `fusion_blocks` mode.
If `fusion_blocks` is set to "midup", it searches for reader attention modules
in both the mid block and up blocks of the UNet model. If `fusion_blocks` is set
to "full", it searches for reader attention modules in the entire UNet model.
It sorts the reader attention modules by the number of neurons in their
`norm1.normalized_shape[0]` attribute in descending order. This sorting ensures
that the modules with more neurons are cleared first.
Finally, it iterates through the sorted list of reader attention modules and
calls the `clear()` method on each module's `bank` attribute to clear the
attention bank.
|
clear
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/mutual_self_attention.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/mutual_self_attention.py
|
MIT
|
def forward(self, x):
"""
Forward pass of the InflatedConv3d layer.
Args:
x (torch.Tensor): Input tensor to the layer.
Returns:
torch.Tensor: Output tensor after applying the InflatedConv3d layer.
"""
video_length = x.shape[2]
x = rearrange(x, "b c f h w -> (b f) c h w")
x = super().forward(x)
x = rearrange(x, "(b f) c h w -> b c f h w", f=video_length)
return x
|
Forward pass of the InflatedConv3d layer.
Args:
x (torch.Tensor): Input tensor to the layer.
Returns:
torch.Tensor: Output tensor after applying the InflatedConv3d layer.
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/resnet.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/resnet.py
|
MIT
|
def forward(self, x):
"""
Performs a forward pass through the CustomClassName.
:param x: Input tensor of shape (batch_size, channels, video_length, height, width).
:return: Output tensor of shape (batch_size, channels, video_length, height, width).
"""
video_length = x.shape[2]
x = rearrange(x, "b c f h w -> (b f) c h w")
x = super().forward(x)
x = rearrange(x, "(b f) c h w -> b c f h w", f=video_length)
return x
|
Performs a forward pass through the CustomClassName.
:param x: Input tensor of shape (batch_size, channels, video_length, height, width).
:return: Output tensor of shape (batch_size, channels, video_length, height, width).
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/resnet.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/resnet.py
|
MIT
|
def forward(self, hidden_states, output_size=None):
"""
Forward pass of the Upsample3D class.
Args:
hidden_states (torch.Tensor): Input tensor to be upsampled.
output_size (tuple, optional): Desired output size of the upsampled tensor.
Returns:
torch.Tensor: Upsampled tensor.
Raises:
AssertionError: If the number of channels in the input tensor does not match the expected channels.
"""
assert hidden_states.shape[1] == self.channels
if self.use_conv_transpose:
raise NotImplementedError
# Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16
dtype = hidden_states.dtype
if dtype == torch.bfloat16:
hidden_states = hidden_states.to(torch.float32)
# upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984
if hidden_states.shape[0] >= 64:
hidden_states = hidden_states.contiguous()
# if `output_size` is passed we force the interpolation output
# size and do not make use of `scale_factor=2`
if output_size is None:
hidden_states = F.interpolate(
hidden_states, scale_factor=[1.0, 2.0, 2.0], mode="nearest"
)
else:
hidden_states = F.interpolate(
hidden_states, size=output_size, mode="nearest"
)
# If the input is bfloat16, we cast back to bfloat16
if dtype == torch.bfloat16:
hidden_states = hidden_states.to(dtype)
# if self.use_conv:
# if self.name == "conv":
# hidden_states = self.conv(hidden_states)
# else:
# hidden_states = self.Conv2d_0(hidden_states)
hidden_states = self.conv(hidden_states)
return hidden_states
|
Forward pass of the Upsample3D class.
Args:
hidden_states (torch.Tensor): Input tensor to be upsampled.
output_size (tuple, optional): Desired output size of the upsampled tensor.
Returns:
torch.Tensor: Upsampled tensor.
Raises:
AssertionError: If the number of channels in the input tensor does not match the expected channels.
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/resnet.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/resnet.py
|
MIT
|
def __init__(
self, channels, use_conv=False, out_channels=None, padding=1, name="conv"
):
"""
Downsamples the given input in the 3D space.
Args:
channels: The number of input channels.
use_conv: Whether to use a convolutional layer for downsampling.
out_channels: The number of output channels. If None, the input channels are used.
padding: The amount of padding to be added to the input.
name: The name of the convolutional layer.
"""
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.padding = padding
stride = 2
self.name = name
if use_conv:
self.conv = InflatedConv3d(
self.channels, self.out_channels, 3, stride=stride, padding=padding
)
else:
raise NotImplementedError
|
Downsamples the given input in the 3D space.
Args:
channels: The number of input channels.
use_conv: Whether to use a convolutional layer for downsampling.
out_channels: The number of output channels. If None, the input channels are used.
padding: The amount of padding to be added to the input.
name: The name of the convolutional layer.
|
__init__
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/resnet.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/resnet.py
|
MIT
|
def forward(self, hidden_states):
"""
Forward pass for the Downsample3D class.
Args:
hidden_states (torch.Tensor): Input tensor to be downsampled.
Returns:
torch.Tensor: Downsampled tensor.
Raises:
AssertionError: If the number of channels in the input tensor does not match the expected channels.
"""
assert hidden_states.shape[1] == self.channels
if self.use_conv and self.padding == 0:
raise NotImplementedError
assert hidden_states.shape[1] == self.channels
hidden_states = self.conv(hidden_states)
return hidden_states
|
Forward pass for the Downsample3D class.
Args:
hidden_states (torch.Tensor): Input tensor to be downsampled.
Returns:
torch.Tensor: Downsampled tensor.
Raises:
AssertionError: If the number of channels in the input tensor does not match the expected channels.
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/resnet.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/resnet.py
|
MIT
|
def forward(self, input_tensor, temb):
"""
Forward pass for the ResnetBlock3D class.
Args:
input_tensor (torch.Tensor): Input tensor to the ResnetBlock3D layer.
temb (torch.Tensor): Token embedding tensor.
Returns:
torch.Tensor: Output tensor after passing through the ResnetBlock3D layer.
"""
hidden_states = input_tensor
hidden_states = self.norm1(hidden_states)
hidden_states = self.nonlinearity(hidden_states)
hidden_states = self.conv1(hidden_states)
if temb is not None:
temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None, None]
if temb is not None and self.time_embedding_norm == "default":
hidden_states = hidden_states + temb
hidden_states = self.norm2(hidden_states)
if temb is not None and self.time_embedding_norm == "scale_shift":
scale, shift = torch.chunk(temb, 2, dim=1)
hidden_states = hidden_states * (1 + scale) + shift
hidden_states = self.nonlinearity(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.conv2(hidden_states)
if self.conv_shortcut is not None:
input_tensor = self.conv_shortcut(input_tensor)
output_tensor = (input_tensor + hidden_states) / self.output_scale_factor
return output_tensor
|
Forward pass for the ResnetBlock3D class.
Args:
input_tensor (torch.Tensor): Input tensor to the ResnetBlock3D layer.
temb (torch.Tensor): Token embedding tensor.
Returns:
torch.Tensor: Output tensor after passing through the ResnetBlock3D layer.
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/resnet.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/resnet.py
|
MIT
|
def forward(
self,
hidden_states: torch.Tensor,
encoder_hidden_states: Optional[torch.Tensor] = None,
timestep: Optional[torch.LongTensor] = None,
_added_cond_kwargs: Dict[str, torch.Tensor] = None,
class_labels: Optional[torch.LongTensor] = None,
cross_attention_kwargs: Dict[str, Any] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
return_dict: bool = True,
):
"""
The [`Transformer2DModel`] forward method.
Args:
hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete,
`torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous):
Input `hidden_states`.
encoder_hidden_states ( `torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
self-attention.
timestep ( `torch.LongTensor`, *optional*):
Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.
class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):
Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in
`AdaLayerZeroNorm`.
cross_attention_kwargs ( `Dict[str, Any]`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor]
(https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
attention_mask ( `torch.Tensor`, *optional*):
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
negative values to the attention scores corresponding to "discard" tokens.
encoder_attention_mask ( `torch.Tensor`, *optional*):
Cross-attention mask applied to `encoder_hidden_states`. Two formats supported:
* Mask `(batch, sequence_length)` True = keep, False = discard.
* Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard.
If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format
above. This bias will be added to the cross-attention scores.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
tuple.
Returns:
If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
`tuple` where the first element is the sample tensor.
"""
# ensure attention_mask is a bias, and give it a singleton query_tokens dimension.
# we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward.
# we can tell by counting dims; if ndim == 2: it's a mask rather than a bias.
# expects mask of shape:
# [batch, key_tokens]
# adds singleton query_tokens dimension:
# [batch, 1, key_tokens]
# this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
# [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
# [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
if attention_mask is not None and attention_mask.ndim == 2:
# assume that mask is expressed as:
# (1 = keep, 0 = discard)
# convert mask into a bias that can be added to attention scores:
# (keep = +0, discard = -10000.0)
attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0
attention_mask = attention_mask.unsqueeze(1)
# convert encoder_attention_mask to a bias the same way we do for attention_mask
if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2:
encoder_attention_mask = (
1 - encoder_attention_mask.to(hidden_states.dtype)
) * -10000.0
encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
# Retrieve lora scale.
lora_scale = (
cross_attention_kwargs.get("scale", 1.0)
if cross_attention_kwargs is not None
else 1.0
)
# 1. Input
batch, _, height, width = hidden_states.shape
residual = hidden_states
hidden_states = self.norm(hidden_states)
if not self.use_linear_projection:
hidden_states = (
self.proj_in(hidden_states, scale=lora_scale)
if not USE_PEFT_BACKEND
else self.proj_in(hidden_states)
)
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(
batch, height * width, inner_dim
)
else:
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(
batch, height * width, inner_dim
)
hidden_states = (
self.proj_in(hidden_states, scale=lora_scale)
if not USE_PEFT_BACKEND
else self.proj_in(hidden_states)
)
# 2. Blocks
if self.caption_projection is not None:
batch_size = hidden_states.shape[0]
encoder_hidden_states = self.caption_projection(encoder_hidden_states)
encoder_hidden_states = encoder_hidden_states.view(
batch_size, -1, hidden_states.shape[-1]
)
ref_feature = hidden_states.reshape(batch, height, width, inner_dim)
for block in self.transformer_blocks:
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
return module(*inputs)
return custom_forward
ckpt_kwargs: Dict[str, Any] = (
{"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
)
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
timestep,
cross_attention_kwargs,
class_labels,
**ckpt_kwargs,
)
else:
hidden_states = block(
hidden_states, # shape [5, 4096, 320]
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states, # shape [1,4,768]
encoder_attention_mask=encoder_attention_mask,
timestep=timestep,
cross_attention_kwargs=cross_attention_kwargs,
class_labels=class_labels,
)
# 3. Output
output = None
if self.is_input_continuous:
if not self.use_linear_projection:
hidden_states = (
hidden_states.reshape(batch, height, width, inner_dim)
.permute(0, 3, 1, 2)
.contiguous()
)
hidden_states = (
self.proj_out(hidden_states, scale=lora_scale)
if not USE_PEFT_BACKEND
else self.proj_out(hidden_states)
)
else:
hidden_states = (
self.proj_out(hidden_states, scale=lora_scale)
if not USE_PEFT_BACKEND
else self.proj_out(hidden_states)
)
hidden_states = (
hidden_states.reshape(batch, height, width, inner_dim)
.permute(0, 3, 1, 2)
.contiguous()
)
output = hidden_states + residual
if not return_dict:
return (output, ref_feature)
return Transformer2DModelOutput(sample=output, ref_feature=ref_feature)
|
The [`Transformer2DModel`] forward method.
Args:
hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete,
`torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous):
Input `hidden_states`.
encoder_hidden_states ( `torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
self-attention.
timestep ( `torch.LongTensor`, *optional*):
Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.
class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):
Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in
`AdaLayerZeroNorm`.
cross_attention_kwargs ( `Dict[str, Any]`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor]
(https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
attention_mask ( `torch.Tensor`, *optional*):
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
negative values to the attention scores corresponding to "discard" tokens.
encoder_attention_mask ( `torch.Tensor`, *optional*):
Cross-attention mask applied to `encoder_hidden_states`. Two formats supported:
* Mask `(batch, sequence_length)` True = keep, False = discard.
* Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard.
If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format
above. This bias will be added to the cross-attention scores.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
tuple.
Returns:
If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
`tuple` where the first element is the sample tensor.
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/transformer_2d.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/transformer_2d.py
|
MIT
|
def forward(
self,
hidden_states,
encoder_hidden_states=None,
attention_mask=None,
full_mask=None,
face_mask=None,
lip_mask=None,
motion_scale=None,
timestep=None,
return_dict: bool = True,
):
"""
Forward pass for the Transformer3DModel.
Args:
hidden_states (torch.Tensor): The input hidden states.
encoder_hidden_states (torch.Tensor, optional): The input encoder hidden states.
attention_mask (torch.Tensor, optional): The attention mask.
full_mask (torch.Tensor, optional): The full mask.
face_mask (torch.Tensor, optional): The face mask.
lip_mask (torch.Tensor, optional): The lip mask.
timestep (int, optional): The current timestep.
return_dict (bool, optional): Whether to return a dictionary or a tuple.
Returns:
output (Union[Tuple, BaseOutput]): The output of the Transformer3DModel.
"""
# Input
assert (
hidden_states.dim() == 5
), f"Expected hidden_states to have ndim=5, but got ndim={hidden_states.dim()}."
video_length = hidden_states.shape[2]
hidden_states = rearrange(hidden_states, "b c f h w -> (b f) c h w")
# TODO
if self.use_audio_module:
encoder_hidden_states = rearrange(
encoder_hidden_states,
"bs f margin dim -> (bs f) margin dim",
)
else:
if encoder_hidden_states.shape[0] != hidden_states.shape[0]:
encoder_hidden_states = repeat(
encoder_hidden_states, "b n c -> (b f) n c", f=video_length
)
batch, _, height, weight = hidden_states.shape
residual = hidden_states
hidden_states = self.norm(hidden_states)
if not self.use_linear_projection:
hidden_states = self.proj_in(hidden_states)
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(
batch, height * weight, inner_dim
)
else:
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(
batch, height * weight, inner_dim
)
hidden_states = self.proj_in(hidden_states)
# Blocks
motion_frames = []
for _, block in enumerate(self.transformer_blocks):
if isinstance(block, TemporalBasicTransformerBlock):
hidden_states, motion_frame_fea = block(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
timestep=timestep,
video_length=video_length,
)
motion_frames.append(motion_frame_fea)
else:
hidden_states = block(
hidden_states, # shape [2, 4096, 320]
encoder_hidden_states=encoder_hidden_states, # shape [2, 20, 640]
attention_mask=attention_mask,
full_mask=full_mask,
face_mask=face_mask,
lip_mask=lip_mask,
timestep=timestep,
video_length=video_length,
motion_scale=motion_scale,
)
# Output
if not self.use_linear_projection:
hidden_states = (
hidden_states.reshape(batch, height, weight, inner_dim)
.permute(0, 3, 1, 2)
.contiguous()
)
hidden_states = self.proj_out(hidden_states)
else:
hidden_states = self.proj_out(hidden_states)
hidden_states = (
hidden_states.reshape(batch, height, weight, inner_dim)
.permute(0, 3, 1, 2)
.contiguous()
)
output = hidden_states + residual
output = rearrange(output, "(b f) c h w -> b c f h w", f=video_length)
if not return_dict:
return (output, motion_frames)
return Transformer3DModelOutput(sample=output)
|
Forward pass for the Transformer3DModel.
Args:
hidden_states (torch.Tensor): The input hidden states.
encoder_hidden_states (torch.Tensor, optional): The input encoder hidden states.
attention_mask (torch.Tensor, optional): The attention mask.
full_mask (torch.Tensor, optional): The full mask.
face_mask (torch.Tensor, optional): The face mask.
lip_mask (torch.Tensor, optional): The lip mask.
timestep (int, optional): The current timestep.
return_dict (bool, optional): Whether to return a dictionary or a tuple.
Returns:
output (Union[Tuple, BaseOutput]): The output of the Transformer3DModel.
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/transformer_3d.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/transformer_3d.py
|
MIT
|
def get_down_block(
down_block_type: str,
num_layers: int,
in_channels: int,
out_channels: int,
temb_channels: int,
add_downsample: bool,
resnet_eps: float,
resnet_act_fn: str,
transformer_layers_per_block: int = 1,
num_attention_heads: Optional[int] = None,
resnet_groups: Optional[int] = None,
cross_attention_dim: Optional[int] = None,
downsample_padding: Optional[int] = None,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
only_cross_attention: bool = False,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
attention_type: str = "default",
attention_head_dim: Optional[int] = None,
dropout: float = 0.0,
):
""" This function creates and returns a UpBlock2D or CrossAttnUpBlock2D object based on the given up_block_type.
Args:
up_block_type (str): The type of up block to create. Must be either "UpBlock2D" or "CrossAttnUpBlock2D".
num_layers (int): The number of layers in the ResNet block.
in_channels (int): The number of input channels.
out_channels (int): The number of output channels.
prev_output_channel (int): The number of channels in the previous output.
temb_channels (int): The number of channels in the token embedding.
add_upsample (bool): Whether to add an upsample layer after the ResNet block. Defaults to True.
resnet_eps (float): The epsilon value for the ResNet block. Defaults to 1e-6.
resnet_act_fn (str): The activation function to use in the ResNet block. Defaults to "swish".
resnet_groups (int): The number of groups in the ResNet block. Defaults to 32.
resnet_pre_norm (bool): Whether to use pre-normalization in the ResNet block. Defaults to True.
output_scale_factor (float): The scale factor to apply to the output. Defaults to 1.0.
Returns:
nn.Module: The created UpBlock2D or CrossAttnUpBlock2D object.
"""
# If attn head dim is not defined, we default it to the number of heads
if attention_head_dim is None:
logger.warning("It is recommended to provide `attention_head_dim` when calling `get_down_block`.")
logger.warning(f"Defaulting `attention_head_dim` to {num_attention_heads}.")
attention_head_dim = num_attention_heads
down_block_type = (
down_block_type[7:]
if down_block_type.startswith("UNetRes")
else down_block_type
)
if down_block_type == "DownBlock2D":
return DownBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
dropout=dropout,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
resnet_time_scale_shift=resnet_time_scale_shift,
)
if down_block_type == "CrossAttnDownBlock2D":
if cross_attention_dim is None:
raise ValueError(
"cross_attention_dim must be specified for CrossAttnDownBlock2D"
)
return CrossAttnDownBlock2D(
num_layers=num_layers,
transformer_layers_per_block=transformer_layers_per_block,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
dropout=dropout,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
cross_attention_dim=cross_attention_dim,
num_attention_heads=num_attention_heads,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
attention_type=attention_type,
)
raise ValueError(f"{down_block_type} does not exist.")
|
This function creates and returns a UpBlock2D or CrossAttnUpBlock2D object based on the given up_block_type.
Args:
up_block_type (str): The type of up block to create. Must be either "UpBlock2D" or "CrossAttnUpBlock2D".
num_layers (int): The number of layers in the ResNet block.
in_channels (int): The number of input channels.
out_channels (int): The number of output channels.
prev_output_channel (int): The number of channels in the previous output.
temb_channels (int): The number of channels in the token embedding.
add_upsample (bool): Whether to add an upsample layer after the ResNet block. Defaults to True.
resnet_eps (float): The epsilon value for the ResNet block. Defaults to 1e-6.
resnet_act_fn (str): The activation function to use in the ResNet block. Defaults to "swish".
resnet_groups (int): The number of groups in the ResNet block. Defaults to 32.
resnet_pre_norm (bool): Whether to use pre-normalization in the ResNet block. Defaults to True.
output_scale_factor (float): The scale factor to apply to the output. Defaults to 1.0.
Returns:
nn.Module: The created UpBlock2D or CrossAttnUpBlock2D object.
|
get_down_block
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/unet_2d_blocks.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/unet_2d_blocks.py
|
MIT
|
def forward(
self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None
) -> torch.FloatTensor:
"""
Forward pass of the UNetMidBlock2D class.
Args:
hidden_states (torch.FloatTensor): The input tensor to the UNetMidBlock2D.
temb (Optional[torch.FloatTensor], optional): The token embedding tensor. Defaults to None.
Returns:
torch.FloatTensor: The output tensor after passing through the UNetMidBlock2D.
"""
# Your implementation here
hidden_states = self.resnets[0](hidden_states, temb)
for attn, resnet in zip(self.attentions, self.resnets[1:]):
if attn is not None:
hidden_states = attn(hidden_states, temb=temb)
hidden_states = resnet(hidden_states, temb)
return hidden_states
|
Forward pass of the UNetMidBlock2D class.
Args:
hidden_states (torch.FloatTensor): The input tensor to the UNetMidBlock2D.
temb (Optional[torch.FloatTensor], optional): The token embedding tensor. Defaults to None.
Returns:
torch.FloatTensor: The output tensor after passing through the UNetMidBlock2D.
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/unet_2d_blocks.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/unet_2d_blocks.py
|
MIT
|
def forward(
self,
hidden_states: torch.FloatTensor,
temb: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
) -> torch.FloatTensor:
"""
Forward pass for the UNetMidBlock2DCrossAttn class.
Args:
hidden_states (torch.FloatTensor): The input hidden states tensor.
temb (Optional[torch.FloatTensor], optional): The optional tensor for time embeddings.
encoder_hidden_states (Optional[torch.FloatTensor], optional): The optional encoder hidden states tensor.
attention_mask (Optional[torch.FloatTensor], optional): The optional attention mask tensor.
cross_attention_kwargs (Optional[Dict[str, Any]], optional): The optional cross-attention kwargs tensor.
encoder_attention_mask (Optional[torch.FloatTensor], optional): The optional encoder attention mask tensor.
Returns:
torch.FloatTensor: The output tensor after passing through the UNetMidBlock2DCrossAttn layers.
"""
lora_scale = (
cross_attention_kwargs.get("scale", 1.0)
if cross_attention_kwargs is not None
else 1.0
)
hidden_states = self.resnets[0](hidden_states, temb, scale=lora_scale)
for attn, resnet in zip(self.attentions, self.resnets[1:]):
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
return module(*inputs)
return custom_forward
ckpt_kwargs: Dict[str, Any] = (
{"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
)
hidden_states, _ref_feature = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet),
hidden_states,
temb,
**ckpt_kwargs,
)
else:
hidden_states, _ref_feature = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)
hidden_states = resnet(hidden_states, temb, scale=lora_scale)
return hidden_states
|
Forward pass for the UNetMidBlock2DCrossAttn class.
Args:
hidden_states (torch.FloatTensor): The input hidden states tensor.
temb (Optional[torch.FloatTensor], optional): The optional tensor for time embeddings.
encoder_hidden_states (Optional[torch.FloatTensor], optional): The optional encoder hidden states tensor.
attention_mask (Optional[torch.FloatTensor], optional): The optional attention mask tensor.
cross_attention_kwargs (Optional[Dict[str, Any]], optional): The optional cross-attention kwargs tensor.
encoder_attention_mask (Optional[torch.FloatTensor], optional): The optional encoder attention mask tensor.
Returns:
torch.FloatTensor: The output tensor after passing through the UNetMidBlock2DCrossAttn layers.
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/unet_2d_blocks.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/unet_2d_blocks.py
|
MIT
|
def forward(
self,
hidden_states: torch.FloatTensor,
temb: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
additional_residuals: Optional[torch.FloatTensor] = None,
) -> Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]:
"""
Forward pass for the CrossAttnDownBlock2D class.
Args:
hidden_states (torch.FloatTensor): The input hidden states.
temb (Optional[torch.FloatTensor], optional): The token embeddings. Defaults to None.
encoder_hidden_states (Optional[torch.FloatTensor], optional): The encoder hidden states. Defaults to None.
attention_mask (Optional[torch.FloatTensor], optional): The attention mask. Defaults to None.
cross_attention_kwargs (Optional[Dict[str, Any]], optional): The cross-attention kwargs. Defaults to None.
encoder_attention_mask (Optional[torch.FloatTensor], optional): The encoder attention mask. Defaults to None.
additional_residuals (Optional[torch.FloatTensor], optional): The additional residuals. Defaults to None.
Returns:
Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: The output hidden states and residuals.
"""
output_states = ()
lora_scale = (
cross_attention_kwargs.get("scale", 1.0)
if cross_attention_kwargs is not None
else 1.0
)
blocks = list(zip(self.resnets, self.attentions))
for i, (resnet, attn) in enumerate(blocks):
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
return module(*inputs)
return custom_forward
ckpt_kwargs: Dict[str, Any] = (
{"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
)
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet),
hidden_states,
temb,
**ckpt_kwargs,
)
hidden_states, _ref_feature = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)
else:
hidden_states = resnet(hidden_states, temb, scale=lora_scale)
hidden_states, _ref_feature = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)
# apply additional residuals to the output of the last pair of resnet and attention blocks
if i == len(blocks) - 1 and additional_residuals is not None:
hidden_states = hidden_states + additional_residuals
output_states = output_states + (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states, scale=lora_scale)
output_states = output_states + (hidden_states,)
return hidden_states, output_states
|
Forward pass for the CrossAttnDownBlock2D class.
Args:
hidden_states (torch.FloatTensor): The input hidden states.
temb (Optional[torch.FloatTensor], optional): The token embeddings. Defaults to None.
encoder_hidden_states (Optional[torch.FloatTensor], optional): The encoder hidden states. Defaults to None.
attention_mask (Optional[torch.FloatTensor], optional): The attention mask. Defaults to None.
cross_attention_kwargs (Optional[Dict[str, Any]], optional): The cross-attention kwargs. Defaults to None.
encoder_attention_mask (Optional[torch.FloatTensor], optional): The encoder attention mask. Defaults to None.
additional_residuals (Optional[torch.FloatTensor], optional): The additional residuals. Defaults to None.
Returns:
Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: The output hidden states and residuals.
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/unet_2d_blocks.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/unet_2d_blocks.py
|
MIT
|
def forward(
self,
hidden_states: torch.FloatTensor,
temb: Optional[torch.FloatTensor] = None,
scale: float = 1.0,
) -> Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]:
"""
Forward pass of the DownBlock2D class.
Args:
hidden_states (torch.FloatTensor): The input tensor to the DownBlock2D layer.
temb (Optional[torch.FloatTensor], optional): The token embedding tensor. Defaults to None.
scale (float, optional): The scale factor for the input tensor. Defaults to 1.0.
Returns:
Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: The output tensor and any additional hidden states.
"""
output_states = ()
for resnet in self.resnets:
if self.training and self.gradient_checkpointing:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
if is_torch_version(">=", "1.11.0"):
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet),
hidden_states,
temb,
use_reentrant=False,
)
else:
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet), hidden_states, temb
)
else:
hidden_states = resnet(hidden_states, temb, scale=scale)
output_states = output_states + (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states, scale=scale)
output_states = output_states + (hidden_states,)
return hidden_states, output_states
|
Forward pass of the DownBlock2D class.
Args:
hidden_states (torch.FloatTensor): The input tensor to the DownBlock2D layer.
temb (Optional[torch.FloatTensor], optional): The token embedding tensor. Defaults to None.
scale (float, optional): The scale factor for the input tensor. Defaults to 1.0.
Returns:
Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: The output tensor and any additional hidden states.
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/unet_2d_blocks.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/unet_2d_blocks.py
|
MIT
|
def forward(
self,
hidden_states: torch.FloatTensor,
res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],
temb: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
upsample_size: Optional[int] = None,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
) -> torch.FloatTensor:
"""
Forward pass for the CrossAttnUpBlock2D class.
Args:
self (CrossAttnUpBlock2D): An instance of the CrossAttnUpBlock2D class.
hidden_states (torch.FloatTensor): The input hidden states tensor.
res_hidden_states_tuple (Tuple[torch.FloatTensor, ...]): A tuple of residual hidden states tensors.
temb (Optional[torch.FloatTensor], optional): The token embeddings tensor. Defaults to None.
encoder_hidden_states (Optional[torch.FloatTensor], optional): The encoder hidden states tensor. Defaults to None.
cross_attention_kwargs (Optional[Dict[str, Any]], optional): Additional keyword arguments for cross attention. Defaults to None.
upsample_size (Optional[int], optional): The upsample size. Defaults to None.
attention_mask (Optional[torch.FloatTensor], optional): The attention mask tensor. Defaults to None.
encoder_attention_mask (Optional[torch.FloatTensor], optional): The encoder attention mask tensor. Defaults to None.
Returns:
torch.FloatTensor: The output tensor after passing through the block.
"""
lora_scale = (
cross_attention_kwargs.get("scale", 1.0)
if cross_attention_kwargs is not None
else 1.0
)
is_freeu_enabled = (
getattr(self, "s1", None)
and getattr(self, "s2", None)
and getattr(self, "b1", None)
and getattr(self, "b2", None)
)
for resnet, attn in zip(self.resnets, self.attentions):
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
# FreeU: Only operate on the first two stages
if is_freeu_enabled:
hidden_states, res_hidden_states = apply_freeu(
self.resolution_idx,
hidden_states,
res_hidden_states,
s1=self.s1,
s2=self.s2,
b1=self.b1,
b2=self.b2,
)
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
return module(*inputs)
return custom_forward
ckpt_kwargs: Dict[str, Any] = (
{"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
)
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet),
hidden_states,
temb,
**ckpt_kwargs,
)
hidden_states, _ref_feature = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)
else:
hidden_states = resnet(hidden_states, temb, scale=lora_scale)
hidden_states, _ref_feature = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(
hidden_states, upsample_size, scale=lora_scale
)
return hidden_states
|
Forward pass for the CrossAttnUpBlock2D class.
Args:
self (CrossAttnUpBlock2D): An instance of the CrossAttnUpBlock2D class.
hidden_states (torch.FloatTensor): The input hidden states tensor.
res_hidden_states_tuple (Tuple[torch.FloatTensor, ...]): A tuple of residual hidden states tensors.
temb (Optional[torch.FloatTensor], optional): The token embeddings tensor. Defaults to None.
encoder_hidden_states (Optional[torch.FloatTensor], optional): The encoder hidden states tensor. Defaults to None.
cross_attention_kwargs (Optional[Dict[str, Any]], optional): Additional keyword arguments for cross attention. Defaults to None.
upsample_size (Optional[int], optional): The upsample size. Defaults to None.
attention_mask (Optional[torch.FloatTensor], optional): The attention mask tensor. Defaults to None.
encoder_attention_mask (Optional[torch.FloatTensor], optional): The encoder attention mask tensor. Defaults to None.
Returns:
torch.FloatTensor: The output tensor after passing through the block.
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/unet_2d_blocks.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/unet_2d_blocks.py
|
MIT
|
def forward(
self,
hidden_states: torch.FloatTensor,
res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],
temb: Optional[torch.FloatTensor] = None,
upsample_size: Optional[int] = None,
scale: float = 1.0,
) -> torch.FloatTensor:
"""
Forward pass for the UpBlock2D class.
Args:
self (UpBlock2D): An instance of the UpBlock2D class.
hidden_states (torch.FloatTensor): The input tensor to the block.
res_hidden_states_tuple (Tuple[torch.FloatTensor, ...]): A tuple of residual hidden states.
temb (Optional[torch.FloatTensor], optional): The token embeddings. Defaults to None.
upsample_size (Optional[int], optional): The size to upsample the input tensor to. Defaults to None.
scale (float, optional): The scale factor to apply to the input tensor. Defaults to 1.0.
Returns:
torch.FloatTensor: The output tensor after passing through the block.
"""
is_freeu_enabled = (
getattr(self, "s1", None)
and getattr(self, "s2", None)
and getattr(self, "b1", None)
and getattr(self, "b2", None)
)
for resnet in self.resnets:
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
# FreeU: Only operate on the first two stages
if is_freeu_enabled:
hidden_states, res_hidden_states = apply_freeu(
self.resolution_idx,
hidden_states,
res_hidden_states,
s1=self.s1,
s2=self.s2,
b1=self.b1,
b2=self.b2,
)
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if self.training and self.gradient_checkpointing:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
if is_torch_version(">=", "1.11.0"):
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet),
hidden_states,
temb,
use_reentrant=False,
)
else:
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet), hidden_states, temb
)
else:
hidden_states = resnet(hidden_states, temb, scale=scale)
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size, scale=scale)
return hidden_states
|
Forward pass for the UpBlock2D class.
Args:
self (UpBlock2D): An instance of the UpBlock2D class.
hidden_states (torch.FloatTensor): The input tensor to the block.
res_hidden_states_tuple (Tuple[torch.FloatTensor, ...]): A tuple of residual hidden states.
temb (Optional[torch.FloatTensor], optional): The token embeddings. Defaults to None.
upsample_size (Optional[int], optional): The size to upsample the input tensor to. Defaults to None.
scale (float, optional): The scale factor to apply to the input tensor. Defaults to 1.0.
Returns:
torch.FloatTensor: The output tensor after passing through the block.
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/unet_2d_blocks.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/unet_2d_blocks.py
|
MIT
|
def attn_processors(self) -> Dict[str, AttentionProcessor]:
r"""
Returns:
`dict` of attention processors: A dictionary containing all attention processors used in the model with
indexed by its weight name.
"""
# set recursively
processors = {}
def fn_recursive_add_processors(
name: str,
module: torch.nn.Module,
processors: Dict[str, AttentionProcessor],
):
if hasattr(module, "get_processor"):
processors[f"{name}.processor"] = module.get_processor(
return_deprecated_lora=True
)
for sub_name, child in module.named_children():
fn_recursive_add_processors(
f"{name}.{sub_name}", child, processors)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(name, module, processors)
return processors
|
Returns:
`dict` of attention processors: A dictionary containing all attention processors used in the model with
indexed by its weight name.
|
attn_processors
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/unet_2d_condition.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/unet_2d_condition.py
|
MIT
|
def set_attn_processor(
self,
processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]],
_remove_lora=False,
):
r"""
Sets the attention processor to use to compute attention.
Parameters:
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
The instantiated processor class or a dictionary of processor classes that will be set as the processor
for **all** `Attention` layers.
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
processor. This is strongly recommended when setting trainable attention processors.
"""
count = len(self.attn_processors.keys())
if isinstance(processor, dict) and len(processor) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
)
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
if hasattr(module, "set_processor"):
if not isinstance(processor, dict):
module.set_processor(processor, _remove_lora=_remove_lora)
else:
module.set_processor(
processor.pop(f"{name}.processor"), _remove_lora=_remove_lora
)
for sub_name, child in module.named_children():
fn_recursive_attn_processor(
f"{name}.{sub_name}", child, processor)
for name, module in self.named_children():
fn_recursive_attn_processor(name, module, processor)
|
Sets the attention processor to use to compute attention.
Parameters:
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
The instantiated processor class or a dictionary of processor classes that will be set as the processor
for **all** `Attention` layers.
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
processor. This is strongly recommended when setting trainable attention processors.
|
set_attn_processor
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/unet_2d_condition.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/unet_2d_condition.py
|
MIT
|
def set_default_attn_processor(self):
"""
Disables custom attention processors and sets the default attention implementation.
"""
if all(
proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS
for proc in self.attn_processors.values()
):
processor = AttnAddedKVProcessor()
elif all(
proc.__class__ in CROSS_ATTENTION_PROCESSORS
for proc in self.attn_processors.values()
):
processor = AttnProcessor()
else:
raise ValueError(
f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
)
self.set_attn_processor(processor, _remove_lora=True)
|
Disables custom attention processors and sets the default attention implementation.
|
set_default_attn_processor
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/unet_2d_condition.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/unet_2d_condition.py
|
MIT
|
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module splits the input tensor in slices to compute attention in
several steps. This is useful for saving some memory in exchange for a small decrease in speed.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
`"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_sliceable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_sliceable_dims(module)
num_sliceable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_sliceable_layers * [1]
slice_size = (
num_sliceable_layers * [slice_size]
if not isinstance(slice_size, list)
else slice_size
)
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i, size in enumerate(slice_size):
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(
f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(
module: torch.nn.Module, slice_size: List[int]
):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
|
Enable sliced attention computation.
When this option is enabled, the attention module splits the input tensor in slices to compute attention in
several steps. This is useful for saving some memory in exchange for a small decrease in speed.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
`"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
|
set_attention_slice
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/unet_2d_condition.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/unet_2d_condition.py
|
MIT
|
def forward(
self,
sample: torch.FloatTensor,
timestep: Union[torch.Tensor, float, int],
encoder_hidden_states: torch.Tensor,
cond_tensor: torch.FloatTensor=None,
class_labels: Optional[torch.Tensor] = None,
timestep_cond: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
mid_block_additional_residual: Optional[torch.Tensor] = None,
down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
return_dict: bool = True,
post_process: bool = False,
) -> Union[UNet2DConditionOutput, Tuple]:
r"""
The [`UNet2DConditionModel`] forward method.
Args:
sample (`torch.FloatTensor`):
The noisy input tensor with the following shape `(batch, channel, height, width)`.
timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.
encoder_hidden_states (`torch.FloatTensor`):
The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
class_labels (`torch.Tensor`, *optional*, defaults to `None`):
Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`):
Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed
through the `self.time_embedding` layer to obtain the timestep embeddings.
attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
negative values to the attention scores corresponding to "discard" tokens.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor]
(https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
added_cond_kwargs: (`dict`, *optional*):
A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that
are passed along to the UNet blocks.
down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*):
A tuple of tensors that if specified are added to the residuals of down unet blocks.
mid_block_additional_residual: (`torch.Tensor`, *optional*):
A tensor that if specified is added to the residual of the middle unet block.
encoder_attention_mask (`torch.Tensor`):
A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
`True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
which adds large negative values to the attention scores corresponding to "discard" tokens.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
tuple.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the [`AttnProcessor`].
added_cond_kwargs: (`dict`, *optional*):
A kwargs dictionary containin additional embeddings that if specified are added to the embeddings that
are passed along to the UNet blocks.
down_block_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
additional residuals to be added to UNet long skip connections from down blocks to up blocks for
example from ControlNet side model(s)
mid_block_additional_residual (`torch.Tensor`, *optional*):
additional residual to be added to UNet mid block output, for example from ControlNet side model
down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s)
Returns:
[`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
If `return_dict` is True, an [`~models.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise
a `tuple` is returned where the first element is the sample tensor.
"""
# By default samples have to be AT least a multiple of the overall upsampling factor.
# The overall upsampling factor is equal to 2 ** (# num of upsampling layers).
# However, the upsampling interpolation output size can be forced to fit any upsampling size
# on the fly if necessary.
default_overall_up_factor = 2**self.num_upsamplers
# upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
forward_upsample_size = False
upsample_size = None
for dim in sample.shape[-2:]:
if dim % default_overall_up_factor != 0:
# Forward upsample size to force interpolation output size.
forward_upsample_size = True
break
# ensure attention_mask is a bias, and give it a singleton query_tokens dimension
# expects mask of shape:
# [batch, key_tokens]
# adds singleton query_tokens dimension:
# [batch, 1, key_tokens]
# this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
# [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
# [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
if attention_mask is not None:
# assume that mask is expressed as:
# (1 = keep, 0 = discard)
# convert mask into a bias that can be added to attention scores:
# (keep = +0, discard = -10000.0)
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
attention_mask = attention_mask.unsqueeze(1)
# convert encoder_attention_mask to a bias the same way we do for attention_mask
if encoder_attention_mask is not None:
encoder_attention_mask = (
1 - encoder_attention_mask.to(sample.dtype)
) * -10000.0
encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
# 0. center input if necessary
if self.config.center_input_sample:
sample = 2 * sample - 1.0
# 1. time
timesteps = timestep
if not torch.is_tensor(timesteps):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
is_mps = sample.device.type == "mps"
if isinstance(timestep, float):
dtype = torch.float32 if is_mps else torch.float64
else:
dtype = torch.int32 if is_mps else torch.int64
timesteps = torch.tensor(
[timesteps], dtype=dtype, device=sample.device)
elif len(timesteps.shape) == 0:
timesteps = timesteps[None].to(sample.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timesteps = timesteps.expand(sample.shape[0])
t_emb = self.time_proj(timesteps)
# `Timesteps` does not contain any weights and will always return f32 tensors
# but time_embedding might actually be running in fp16. so we need to cast here.
# there might be better ways to encapsulate this.
t_emb = t_emb.to(dtype=sample.dtype)
emb = self.time_embedding(t_emb, timestep_cond)
aug_emb = None
if self.class_embedding is not None:
if class_labels is None:
raise ValueError(
"class_labels should be provided when num_class_embeds > 0"
)
if self.config.class_embed_type == "timestep":
class_labels = self.time_proj(class_labels)
# `Timesteps` does not contain any weights and will always return f32 tensors
# there might be better ways to encapsulate this.
class_labels = class_labels.to(dtype=sample.dtype)
class_emb = self.class_embedding(
class_labels).to(dtype=sample.dtype)
if self.config.class_embeddings_concat:
emb = torch.cat([emb, class_emb], dim=-1)
else:
emb = emb + class_emb
if self.config.addition_embed_type == "text":
aug_emb = self.add_embedding(encoder_hidden_states)
elif self.config.addition_embed_type == "text_image":
# Kandinsky 2.1 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'text_image'"
"which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
)
image_embs = added_cond_kwargs.get("image_embeds")
text_embs = added_cond_kwargs.get(
"text_embeds", encoder_hidden_states)
aug_emb = self.add_embedding(text_embs, image_embs)
elif self.config.addition_embed_type == "text_time":
# SDXL - style
if "text_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'text_time'"
"which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
)
text_embeds = added_cond_kwargs.get("text_embeds")
if "time_ids" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'text_time'"
"which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
)
time_ids = added_cond_kwargs.get("time_ids")
time_embeds = self.add_time_proj(time_ids.flatten())
time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
add_embeds = add_embeds.to(emb.dtype)
aug_emb = self.add_embedding(add_embeds)
elif self.config.addition_embed_type == "image":
# Kandinsky 2.2 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'image'"
"which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
)
image_embs = added_cond_kwargs.get("image_embeds")
aug_emb = self.add_embedding(image_embs)
elif self.config.addition_embed_type == "image_hint":
# Kandinsky 2.2 - style
if (
"image_embeds" not in added_cond_kwargs
or "hint" not in added_cond_kwargs
):
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint'"
"which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`"
)
image_embs = added_cond_kwargs.get("image_embeds")
hint = added_cond_kwargs.get("hint")
aug_emb, hint = self.add_embedding(image_embs, hint)
sample = torch.cat([sample, hint], dim=1)
emb = emb + aug_emb if aug_emb is not None else emb
if self.time_embed_act is not None:
emb = self.time_embed_act(emb)
if (
self.encoder_hid_proj is not None
and self.config.encoder_hid_dim_type == "text_proj"
):
encoder_hidden_states = self.encoder_hid_proj(
encoder_hidden_states)
elif (
self.encoder_hid_proj is not None
and self.config.encoder_hid_dim_type == "text_image_proj"
):
# Kadinsky 2.1 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj'"
"which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
)
image_embeds = added_cond_kwargs.get("image_embeds")
encoder_hidden_states = self.encoder_hid_proj(
encoder_hidden_states, image_embeds
)
elif (
self.encoder_hid_proj is not None
and self.config.encoder_hid_dim_type == "image_proj"
):
# Kandinsky 2.2 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj'"
"which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
)
image_embeds = added_cond_kwargs.get("image_embeds")
encoder_hidden_states = self.encoder_hid_proj(image_embeds)
elif (
self.encoder_hid_proj is not None
and self.config.encoder_hid_dim_type == "ip_image_proj"
):
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'ip_image_proj'"
"which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
)
image_embeds = added_cond_kwargs.get("image_embeds")
image_embeds = self.encoder_hid_proj(image_embeds).to(
encoder_hidden_states.dtype
)
encoder_hidden_states = torch.cat(
[encoder_hidden_states, image_embeds], dim=1
)
# 2. pre-process
sample = self.conv_in(sample)
if cond_tensor is not None:
sample = sample + cond_tensor
# 2.5 GLIGEN position net
if (
cross_attention_kwargs is not None
and cross_attention_kwargs.get("gligen", None) is not None
):
cross_attention_kwargs = cross_attention_kwargs.copy()
gligen_args = cross_attention_kwargs.pop("gligen")
cross_attention_kwargs["gligen"] = {
"objs": self.position_net(**gligen_args)
}
# 3. down
lora_scale = (
cross_attention_kwargs.get("scale", 1.0)
if cross_attention_kwargs is not None
else 1.0
)
if USE_PEFT_BACKEND:
# weight the lora layers by setting `lora_scale` for each PEFT layer
scale_lora_layers(self, lora_scale)
is_controlnet = (
mid_block_additional_residual is not None
and down_block_additional_residuals is not None
)
# using new arg down_intrablock_additional_residuals for T2I-Adapters, to distinguish from controlnets
is_adapter = down_intrablock_additional_residuals is not None
# maintain backward compatibility for legacy usage, where
# T2I-Adapter and ControlNet both use down_block_additional_residuals arg
# but can only use one or the other
if (
not is_adapter
and mid_block_additional_residual is None
and down_block_additional_residuals is not None
):
deprecate(
"T2I should not use down_block_additional_residuals",
"1.3.0",
"Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \
and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \
for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ",
standard_warn=False,
)
down_intrablock_additional_residuals = down_block_additional_residuals
is_adapter = True
down_block_res_samples = (sample,)
for downsample_block in self.down_blocks:
if (
hasattr(downsample_block, "has_cross_attention")
and downsample_block.has_cross_attention
):
# For t2i-adapter CrossAttnDownBlock2D
additional_residuals = {}
if is_adapter and len(down_intrablock_additional_residuals) > 0:
additional_residuals["additional_residuals"] = (
down_intrablock_additional_residuals.pop(0)
)
sample, res_samples = downsample_block(
hidden_states=sample,
temb=emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
encoder_attention_mask=encoder_attention_mask,
**additional_residuals,
)
else:
sample, res_samples = downsample_block(
hidden_states=sample, temb=emb, scale=lora_scale
)
if is_adapter and len(down_intrablock_additional_residuals) > 0:
sample += down_intrablock_additional_residuals.pop(0)
down_block_res_samples += res_samples
if is_controlnet:
new_down_block_res_samples = ()
for down_block_res_sample, down_block_additional_residual in zip(
down_block_res_samples, down_block_additional_residuals
):
down_block_res_sample = (
down_block_res_sample + down_block_additional_residual
)
new_down_block_res_samples = new_down_block_res_samples + (
down_block_res_sample,
)
down_block_res_samples = new_down_block_res_samples
# 4. mid
if self.mid_block is not None:
if (
hasattr(self.mid_block, "has_cross_attention")
and self.mid_block.has_cross_attention
):
sample = self.mid_block(
sample,
emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
encoder_attention_mask=encoder_attention_mask,
)
else:
sample = self.mid_block(sample, emb)
# To support T2I-Adapter-XL
if (
is_adapter
and len(down_intrablock_additional_residuals) > 0
and sample.shape == down_intrablock_additional_residuals[0].shape
):
sample += down_intrablock_additional_residuals.pop(0)
if is_controlnet:
sample = sample + mid_block_additional_residual
# 5. up
for i, upsample_block in enumerate(self.up_blocks):
is_final_block = i == len(self.up_blocks) - 1
res_samples = down_block_res_samples[-len(upsample_block.resnets):]
down_block_res_samples = down_block_res_samples[
: -len(upsample_block.resnets)
]
# if we have not reached the final block and need to forward the
# upsample size, we do it here
if not is_final_block and forward_upsample_size:
upsample_size = down_block_res_samples[-1].shape[2:]
if (
hasattr(upsample_block, "has_cross_attention")
and upsample_block.has_cross_attention
):
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
upsample_size=upsample_size,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
)
else:
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
upsample_size=upsample_size,
scale=lora_scale,
)
# 6. post-process
if post_process:
if self.conv_norm_out:
sample = self.conv_norm_out(sample)
sample = self.conv_act(sample)
sample = self.conv_out(sample)
if USE_PEFT_BACKEND:
# remove `lora_scale` from each PEFT layer
unscale_lora_layers(self, lora_scale)
if not return_dict:
return (sample,)
return UNet2DConditionOutput(sample=sample)
|
The [`UNet2DConditionModel`] forward method.
Args:
sample (`torch.FloatTensor`):
The noisy input tensor with the following shape `(batch, channel, height, width)`.
timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.
encoder_hidden_states (`torch.FloatTensor`):
The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
class_labels (`torch.Tensor`, *optional*, defaults to `None`):
Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`):
Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed
through the `self.time_embedding` layer to obtain the timestep embeddings.
attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
negative values to the attention scores corresponding to "discard" tokens.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor]
(https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
added_cond_kwargs: (`dict`, *optional*):
A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that
are passed along to the UNet blocks.
down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*):
A tuple of tensors that if specified are added to the residuals of down unet blocks.
mid_block_additional_residual: (`torch.Tensor`, *optional*):
A tensor that if specified is added to the residual of the middle unet block.
encoder_attention_mask (`torch.Tensor`):
A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
`True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
which adds large negative values to the attention scores corresponding to "discard" tokens.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
tuple.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the [`AttnProcessor`].
added_cond_kwargs: (`dict`, *optional*):
A kwargs dictionary containin additional embeddings that if specified are added to the embeddings that
are passed along to the UNet blocks.
down_block_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
additional residuals to be added to UNet long skip connections from down blocks to up blocks for
example from ControlNet side model(s)
mid_block_additional_residual (`torch.Tensor`, *optional*):
additional residual to be added to UNet mid block output, for example from ControlNet side model
down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s)
Returns:
[`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
If `return_dict` is True, an [`~models.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise
a `tuple` is returned where the first element is the sample tensor.
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/unet_2d_condition.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/unet_2d_condition.py
|
MIT
|
def load_change_cross_attention_dim(
cls,
pretrained_model_path: PathLike,
subfolder=None,
# unet_additional_kwargs=None,
):
"""
Load or change the cross-attention dimension of a pre-trained model.
Parameters:
pretrained_model_name_or_path (:class:`~typing.Union[str, :class:`~pathlib.Path`]`):
The identifier of the pre-trained model or the path to the local folder containing the model.
force_download (:class:`~bool`):
If True, re-download the model even if it is already cached.
resume_download (:class:`~bool`):
If True, resume the download of the model if partially downloaded.
proxies (:class:`~dict`):
A dictionary of proxy servers to use for downloading the model.
cache_dir (:class:`~Optional[str]`):
The path to the cache directory for storing downloaded models.
use_auth_token (:class:`~bool`):
If True, use the authentication token for private models.
revision (:class:`~str`):
The specific model version to use.
use_safetensors (:class:`~bool`):
If True, use the SafeTensors format for loading the model weights.
**kwargs (:class:`~dict`):
Additional keyword arguments passed to the model.
"""
pretrained_model_path = Path(pretrained_model_path)
if subfolder is not None:
pretrained_model_path = pretrained_model_path.joinpath(subfolder)
config_file = pretrained_model_path / "config.json"
if not (config_file.exists() and config_file.is_file()):
raise RuntimeError(
f"{config_file} does not exist or is not a file")
unet_config = cls.load_config(config_file)
unet_config["cross_attention_dim"] = 1024
model = cls.from_config(unet_config)
# load the vanilla weights
if pretrained_model_path.joinpath(SAFETENSORS_WEIGHTS_NAME).exists():
logger.debug(
f"loading safeTensors weights from {pretrained_model_path} ..."
)
state_dict = load_file(
pretrained_model_path.joinpath(SAFETENSORS_WEIGHTS_NAME), device="cpu"
)
elif pretrained_model_path.joinpath(WEIGHTS_NAME).exists():
logger.debug(f"loading weights from {pretrained_model_path} ...")
state_dict = torch.load(
pretrained_model_path.joinpath(WEIGHTS_NAME),
map_location="cpu",
weights_only=True,
)
else:
raise FileNotFoundError(
f"no weights file found in {pretrained_model_path}")
model_state_dict = model.state_dict()
for k in state_dict:
if k in model_state_dict:
if state_dict[k].shape != model_state_dict[k].shape:
state_dict[k] = model_state_dict[k]
# load the weights into the model
m, u = model.load_state_dict(state_dict, strict=False)
print(m, u)
return model
|
Load or change the cross-attention dimension of a pre-trained model.
Parameters:
pretrained_model_name_or_path (:class:`~typing.Union[str, :class:`~pathlib.Path`]`):
The identifier of the pre-trained model or the path to the local folder containing the model.
force_download (:class:`~bool`):
If True, re-download the model even if it is already cached.
resume_download (:class:`~bool`):
If True, resume the download of the model if partially downloaded.
proxies (:class:`~dict`):
A dictionary of proxy servers to use for downloading the model.
cache_dir (:class:`~Optional[str]`):
The path to the cache directory for storing downloaded models.
use_auth_token (:class:`~bool`):
If True, use the authentication token for private models.
revision (:class:`~str`):
The specific model version to use.
use_safetensors (:class:`~bool`):
If True, use the SafeTensors format for loading the model weights.
**kwargs (:class:`~dict`):
Additional keyword arguments passed to the model.
|
load_change_cross_attention_dim
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/unet_2d_condition.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/unet_2d_condition.py
|
MIT
|
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_slicable_layers * [1]
slice_size = (
num_slicable_layers * [slice_size]
if not isinstance(slice_size, list)
else slice_size
)
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i, size in enumerate(slice_size):
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(
f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(
module: torch.nn.Module, slice_size: List[int]
):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
|
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
|
set_attention_slice
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/unet_3d.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/unet_3d.py
|
MIT
|
def forward(
self,
sample: torch.FloatTensor,
timestep: Union[torch.Tensor, float, int],
encoder_hidden_states: torch.Tensor,
audio_embedding: Optional[torch.Tensor] = None,
class_labels: Optional[torch.Tensor] = None,
mask_cond_fea: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
full_mask: Optional[torch.Tensor] = None,
face_mask: Optional[torch.Tensor] = None,
lip_mask: Optional[torch.Tensor] = None,
motion_scale: Optional[torch.Tensor] = None,
down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
mid_block_additional_residual: Optional[torch.Tensor] = None,
return_dict: bool = True,
# start: bool = False,
) -> Union[UNet3DConditionOutput, Tuple]:
r"""
Args:
sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor
timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps
encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states, face_emb
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
mask_cond_fea (`torch.FloatTensor`, *optional*): mask_feature tensor
audio_embedding (`torch.FloatTensor`, *optional*): audio embedding tensor, audio_emb
full_mask (`torch.FloatTensor`, *optional*): full mask tensor, full_mask
face_mask (`torch.FloatTensor`, *optional*): face mask tensor, face_mask
lip_mask (`torch.FloatTensor`, *optional*): lip mask tensor, lip_mask
Returns:
[`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
[`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When
returning a tuple, the first element is the sample tensor.
"""
# By default samples have to be AT least a multiple of the overall upsampling factor.
# The overall upsampling factor is equal to 2 ** (# num of upsampling layears).
# However, the upsampling interpolation output size can be forced to fit any upsampling size
# on the fly if necessary.
default_overall_up_factor = 2**self.num_upsamplers
# upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
forward_upsample_size = False
upsample_size = None
if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):
logger.info(
"Forward upsample size to force interpolation output size.")
forward_upsample_size = True
# prepare attention_mask
if attention_mask is not None:
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
attention_mask = attention_mask.unsqueeze(1)
# center input if necessary
if self.config.center_input_sample:
sample = 2 * sample - 1.0
# time
timesteps = timestep
if not torch.is_tensor(timesteps):
# This would be a good case for the `match` statement (Python 3.10+)
is_mps = sample.device.type == "mps"
if isinstance(timestep, float):
dtype = torch.float32 if is_mps else torch.float64
else:
dtype = torch.int32 if is_mps else torch.int64
timesteps = torch.tensor(
[timesteps], dtype=dtype, device=sample.device)
elif len(timesteps.shape) == 0:
timesteps = timesteps[None].to(sample.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timesteps = timesteps.expand(sample.shape[0])
t_emb = self.time_proj(timesteps)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might actually be running in fp16. so we need to cast here.
# there might be better ways to encapsulate this.
t_emb = t_emb.to(dtype=self.dtype)
emb = self.time_embedding(t_emb)
if self.class_embedding is not None:
if class_labels is None:
raise ValueError(
"class_labels should be provided when num_class_embeds > 0"
)
if self.config.class_embed_type == "timestep":
class_labels = self.time_proj(class_labels)
class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
emb = emb + class_emb
# pre-process
sample = self.conv_in(sample)
if mask_cond_fea is not None:
sample = sample + mask_cond_fea
# down
down_block_res_samples = (sample,)
for downsample_block in self.down_blocks:
if (
hasattr(downsample_block, "has_cross_attention")
and downsample_block.has_cross_attention
):
sample, res_samples = downsample_block(
hidden_states=sample,
temb=emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
full_mask=full_mask,
face_mask=face_mask,
lip_mask=lip_mask,
audio_embedding=audio_embedding,
motion_scale=motion_scale,
)
# print("")
else:
sample, res_samples = downsample_block(
hidden_states=sample,
temb=emb,
encoder_hidden_states=encoder_hidden_states,
# audio_embedding=audio_embedding,
)
# print("")
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
new_down_block_res_samples = ()
for down_block_res_sample, down_block_additional_residual in zip(
down_block_res_samples, down_block_additional_residuals
):
down_block_res_sample = (
down_block_res_sample + down_block_additional_residual
)
new_down_block_res_samples += (down_block_res_sample,)
down_block_res_samples = new_down_block_res_samples
# mid
sample = self.mid_block(
sample,
emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
full_mask=full_mask,
face_mask=face_mask,
lip_mask=lip_mask,
audio_embedding=audio_embedding,
motion_scale=motion_scale,
)
if mid_block_additional_residual is not None:
sample = sample + mid_block_additional_residual
# up
for i, upsample_block in enumerate(self.up_blocks):
is_final_block = i == len(self.up_blocks) - 1
res_samples = down_block_res_samples[-len(upsample_block.resnets):]
down_block_res_samples = down_block_res_samples[
: -len(upsample_block.resnets)
]
# if we have not reached the final block and need to forward the
# upsample size, we do it here
if not is_final_block and forward_upsample_size:
upsample_size = down_block_res_samples[-1].shape[2:]
if (
hasattr(upsample_block, "has_cross_attention")
and upsample_block.has_cross_attention
):
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
encoder_hidden_states=encoder_hidden_states,
upsample_size=upsample_size,
attention_mask=attention_mask,
full_mask=full_mask,
face_mask=face_mask,
lip_mask=lip_mask,
audio_embedding=audio_embedding,
motion_scale=motion_scale,
)
else:
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
upsample_size=upsample_size,
encoder_hidden_states=encoder_hidden_states,
# audio_embedding=audio_embedding,
)
# post-process
sample = self.conv_norm_out(sample)
sample = self.conv_act(sample)
sample = self.conv_out(sample)
if not return_dict:
return (sample,)
return UNet3DConditionOutput(sample=sample)
|
Args:
sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor
timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps
encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states, face_emb
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
mask_cond_fea (`torch.FloatTensor`, *optional*): mask_feature tensor
audio_embedding (`torch.FloatTensor`, *optional*): audio embedding tensor, audio_emb
full_mask (`torch.FloatTensor`, *optional*): full mask tensor, full_mask
face_mask (`torch.FloatTensor`, *optional*): face mask tensor, face_mask
lip_mask (`torch.FloatTensor`, *optional*): lip mask tensor, lip_mask
Returns:
[`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
[`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When
returning a tuple, the first element is the sample tensor.
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/unet_3d.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/unet_3d.py
|
MIT
|
def from_pretrained_2d(
cls,
pretrained_model_path: PathLike,
motion_module_path: PathLike,
subfolder=None,
unet_additional_kwargs=None,
mm_zero_proj_out=False,
use_landmark=True,
):
"""
Load a pre-trained 2D UNet model from a given directory.
Parameters:
pretrained_model_path (`str` or `PathLike`):
Path to the directory containing a pre-trained 2D UNet model.
dtype (`torch.dtype`, *optional*):
The data type of the loaded model. If not provided, the default data type is used.
device (`torch.device`, *optional*):
The device on which the loaded model will be placed. If not provided, the default device is used.
**kwargs (`Any`):
Additional keyword arguments passed to the model.
Returns:
`UNet3DConditionModel`:
The loaded 2D UNet model.
"""
pretrained_model_path = Path(pretrained_model_path)
motion_module_path = Path(motion_module_path)
if subfolder is not None:
pretrained_model_path = pretrained_model_path.joinpath(subfolder)
logger.info(
f"loaded temporal unet's pretrained weights from {pretrained_model_path} ..."
)
config_file = pretrained_model_path / "config.json"
if not (config_file.exists() and config_file.is_file()):
raise RuntimeError(
f"{config_file} does not exist or is not a file")
unet_config = cls.load_config(config_file)
unet_config["_class_name"] = cls.__name__
unet_config["down_block_types"] = [
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
]
unet_config["up_block_types"] = [
"UpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
]
unet_config["mid_block_type"] = "UNetMidBlock3DCrossAttn"
if use_landmark:
unet_config["in_channels"] = 8
unet_config["out_channels"] = 8
model = cls.from_config(unet_config, **unet_additional_kwargs)
# load the vanilla weights
if pretrained_model_path.joinpath(SAFETENSORS_WEIGHTS_NAME).exists():
logger.debug(
f"loading safeTensors weights from {pretrained_model_path} ..."
)
state_dict = load_file(
pretrained_model_path.joinpath(SAFETENSORS_WEIGHTS_NAME), device="cpu"
)
elif pretrained_model_path.joinpath(WEIGHTS_NAME).exists():
logger.debug(f"loading weights from {pretrained_model_path} ...")
state_dict = torch.load(
pretrained_model_path.joinpath(WEIGHTS_NAME),
map_location="cpu",
weights_only=True,
)
else:
raise FileNotFoundError(
f"no weights file found in {pretrained_model_path}")
# load the motion module weights
if motion_module_path.exists() and motion_module_path.is_file():
if motion_module_path.suffix.lower() in [".pth", ".pt", ".ckpt"]:
print(
f"Load motion module params from {motion_module_path}")
motion_state_dict = torch.load(
motion_module_path, map_location="cpu", weights_only=True
)
elif motion_module_path.suffix.lower() == ".safetensors":
motion_state_dict = load_file(motion_module_path, device="cpu")
else:
raise RuntimeError(
f"unknown file format for motion module weights: {motion_module_path.suffix}"
)
if mm_zero_proj_out:
logger.info(
"Zero initialize proj_out layers in motion module...")
new_motion_state_dict = OrderedDict()
for k in motion_state_dict:
if "proj_out" in k:
continue
new_motion_state_dict[k] = motion_state_dict[k]
motion_state_dict = new_motion_state_dict
# merge the state dicts
state_dict.update(motion_state_dict)
model_state_dict = model.state_dict()
for k in state_dict:
if k in model_state_dict:
if state_dict[k].shape != model_state_dict[k].shape:
state_dict[k] = model_state_dict[k]
# load the weights into the model
m, u = model.load_state_dict(state_dict, strict=False)
logger.debug(
f"### missing keys: {len(m)}; \n### unexpected keys: {len(u)};")
params = [
p.numel() if "temporal" in n else 0 for n, p in model.named_parameters()
]
logger.info(f"Loaded {sum(params) / 1e6}M-parameter motion module")
return model
|
Load a pre-trained 2D UNet model from a given directory.
Parameters:
pretrained_model_path (`str` or `PathLike`):
Path to the directory containing a pre-trained 2D UNet model.
dtype (`torch.dtype`, *optional*):
The data type of the loaded model. If not provided, the default data type is used.
device (`torch.device`, *optional*):
The device on which the loaded model will be placed. If not provided, the default device is used.
**kwargs (`Any`):
Additional keyword arguments passed to the model.
Returns:
`UNet3DConditionModel`:
The loaded 2D UNet model.
|
from_pretrained_2d
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/unet_3d.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/unet_3d.py
|
MIT
|
def get_down_block(
down_block_type,
num_layers,
in_channels,
out_channels,
temb_channels,
add_downsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
audio_attention_dim=None,
downsample_padding=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_inflated_groupnorm=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
use_audio_module=None,
depth=0,
stack_enable_blocks_name=None,
stack_enable_blocks_depth=None,
):
"""
Factory function to instantiate a down-block module for the 3D UNet architecture.
Down blocks are used in the downsampling part of the U-Net to reduce the spatial dimensions
of the feature maps while increasing the depth. This function can create blocks with or without
cross attention based on the specified parameters.
Parameters:
- down_block_type (str): The type of down block to instantiate.
- num_layers (int): The number of layers in the block.
- in_channels (int): The number of input channels.
- out_channels (int): The number of output channels.
- temb_channels (int): The number of token embedding channels.
- add_downsample (bool): Flag to add a downsampling layer.
- resnet_eps (float): Epsilon for residual block stability.
- resnet_act_fn (callable): Activation function for the residual block.
- ... (remaining parameters): Additional parameters for configuring the block.
Returns:
- nn.Module: An instance of a down-sampling block module.
"""
down_block_type = (
down_block_type[7:]
if down_block_type.startswith("UNetRes")
else down_block_type
)
if down_block_type == "DownBlock3D":
return DownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
resnet_time_scale_shift=resnet_time_scale_shift,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
if down_block_type == "CrossAttnDownBlock3D":
if cross_attention_dim is None:
raise ValueError(
"cross_attention_dim must be specified for CrossAttnDownBlock3D"
)
return CrossAttnDownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
cross_attention_dim=cross_attention_dim,
audio_attention_dim=audio_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
use_audio_module=use_audio_module,
depth=depth,
stack_enable_blocks_name=stack_enable_blocks_name,
stack_enable_blocks_depth=stack_enable_blocks_depth,
)
raise ValueError(f"{down_block_type} does not exist.")
|
Factory function to instantiate a down-block module for the 3D UNet architecture.
Down blocks are used in the downsampling part of the U-Net to reduce the spatial dimensions
of the feature maps while increasing the depth. This function can create blocks with or without
cross attention based on the specified parameters.
Parameters:
- down_block_type (str): The type of down block to instantiate.
- num_layers (int): The number of layers in the block.
- in_channels (int): The number of input channels.
- out_channels (int): The number of output channels.
- temb_channels (int): The number of token embedding channels.
- add_downsample (bool): Flag to add a downsampling layer.
- resnet_eps (float): Epsilon for residual block stability.
- resnet_act_fn (callable): Activation function for the residual block.
- ... (remaining parameters): Additional parameters for configuring the block.
Returns:
- nn.Module: An instance of a down-sampling block module.
|
get_down_block
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/unet_3d_blocks.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/unet_3d_blocks.py
|
MIT
|
def get_up_block(
up_block_type,
num_layers,
in_channels,
out_channels,
prev_output_channel,
temb_channels,
add_upsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
audio_attention_dim=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_inflated_groupnorm=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
use_audio_module=None,
depth=0,
stack_enable_blocks_name=None,
stack_enable_blocks_depth=None,
):
"""
Factory function to instantiate an up-block module for the 3D UNet architecture.
Up blocks are used in the upsampling part of the U-Net to increase the spatial dimensions
of the feature maps while decreasing the depth. This function can create blocks with or without
cross attention based on the specified parameters.
Parameters:
- up_block_type (str): The type of up block to instantiate.
- num_layers (int): The number of layers in the block.
- in_channels (int): The number of input channels.
- out_channels (int): The number of output channels.
- prev_output_channel (int): The number of channels from the previous layer's output.
- temb_channels (int): The number of token embedding channels.
- add_upsample (bool): Flag to add an upsampling layer.
- resnet_eps (float): Epsilon for residual block stability.
- resnet_act_fn (callable): Activation function for the residual block.
- ... (remaining parameters): Additional parameters for configuring the block.
Returns:
- nn.Module: An instance of an up-sampling block module.
"""
up_block_type = (
up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
)
if up_block_type == "UpBlock3D":
return UpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
resnet_time_scale_shift=resnet_time_scale_shift,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
if up_block_type == "CrossAttnUpBlock3D":
if cross_attention_dim is None:
raise ValueError(
"cross_attention_dim must be specified for CrossAttnUpBlock3D"
)
return CrossAttnUpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
cross_attention_dim=cross_attention_dim,
audio_attention_dim=audio_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
use_audio_module=use_audio_module,
depth=depth,
stack_enable_blocks_name=stack_enable_blocks_name,
stack_enable_blocks_depth=stack_enable_blocks_depth,
)
raise ValueError(f"{up_block_type} does not exist.")
|
Factory function to instantiate an up-block module for the 3D UNet architecture.
Up blocks are used in the upsampling part of the U-Net to increase the spatial dimensions
of the feature maps while decreasing the depth. This function can create blocks with or without
cross attention based on the specified parameters.
Parameters:
- up_block_type (str): The type of up block to instantiate.
- num_layers (int): The number of layers in the block.
- in_channels (int): The number of input channels.
- out_channels (int): The number of output channels.
- prev_output_channel (int): The number of channels from the previous layer's output.
- temb_channels (int): The number of token embedding channels.
- add_upsample (bool): Flag to add an upsampling layer.
- resnet_eps (float): Epsilon for residual block stability.
- resnet_act_fn (callable): Activation function for the residual block.
- ... (remaining parameters): Additional parameters for configuring the block.
Returns:
- nn.Module: An instance of an up-sampling block module.
|
get_up_block
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/unet_3d_blocks.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/unet_3d_blocks.py
|
MIT
|
def forward(
self,
hidden_states,
temb=None,
encoder_hidden_states=None,
attention_mask=None,
full_mask=None,
face_mask=None,
lip_mask=None,
audio_embedding=None,
motion_scale=None,
):
"""
Forward pass for the UNetMidBlock3DCrossAttn class.
Args:
self (UNetMidBlock3DCrossAttn): An instance of the UNetMidBlock3DCrossAttn class.
hidden_states (Tensor): The input hidden states tensor.
temb (Tensor, optional): The input temporal embedding tensor. Defaults to None.
encoder_hidden_states (Tensor, optional): The encoder hidden states tensor. Defaults to None.
attention_mask (Tensor, optional): The attention mask tensor. Defaults to None.
full_mask (Tensor, optional): The full mask tensor. Defaults to None.
face_mask (Tensor, optional): The face mask tensor. Defaults to None.
lip_mask (Tensor, optional): The lip mask tensor. Defaults to None.
audio_embedding (Tensor, optional): The audio embedding tensor. Defaults to None.
Returns:
Tensor: The output tensor after passing through the UNetMidBlock3DCrossAttn layers.
"""
hidden_states = self.resnets[0](hidden_states, temb)
for attn, resnet, audio_module, motion_module in zip(
self.attentions, self.resnets[1:], self.audio_modules, self.motion_modules
):
hidden_states, motion_frame = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
return_dict=False,
) # .sample
if len(motion_frame[0]) > 0:
# if motion_frame[0][0].numel() > 0:
motion_frames = motion_frame[0][0]
motion_frames = rearrange(
motion_frames,
"b f (d1 d2) c -> b c f d1 d2",
d1=hidden_states.size(-1),
)
else:
motion_frames = torch.zeros(
hidden_states.shape[0],
hidden_states.shape[1],
4,
hidden_states.shape[3],
hidden_states.shape[4],
)
n_motion_frames = motion_frames.size(2)
if audio_module is not None:
hidden_states = (
audio_module(
hidden_states,
encoder_hidden_states=audio_embedding,
attention_mask=attention_mask,
full_mask=full_mask,
face_mask=face_mask,
lip_mask=lip_mask,
motion_scale=motion_scale,
return_dict=False,
)
)[0] # .sample
if motion_module is not None:
motion_frames = motion_frames.to(
device=hidden_states.device, dtype=hidden_states.dtype
)
_hidden_states = (
torch.cat([motion_frames, hidden_states], dim=2)
if n_motion_frames > 0
else hidden_states
)
hidden_states = motion_module(
_hidden_states, encoder_hidden_states=encoder_hidden_states
)
hidden_states = hidden_states[:, :, n_motion_frames:]
hidden_states = resnet(hidden_states, temb)
return hidden_states
|
Forward pass for the UNetMidBlock3DCrossAttn class.
Args:
self (UNetMidBlock3DCrossAttn): An instance of the UNetMidBlock3DCrossAttn class.
hidden_states (Tensor): The input hidden states tensor.
temb (Tensor, optional): The input temporal embedding tensor. Defaults to None.
encoder_hidden_states (Tensor, optional): The encoder hidden states tensor. Defaults to None.
attention_mask (Tensor, optional): The attention mask tensor. Defaults to None.
full_mask (Tensor, optional): The full mask tensor. Defaults to None.
face_mask (Tensor, optional): The face mask tensor. Defaults to None.
lip_mask (Tensor, optional): The lip mask tensor. Defaults to None.
audio_embedding (Tensor, optional): The audio embedding tensor. Defaults to None.
Returns:
Tensor: The output tensor after passing through the UNetMidBlock3DCrossAttn layers.
|
forward
|
python
|
jdh-algo/JoyHallo
|
joyhallo/models/unet_3d_blocks.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/joyhallo/models/unet_3d_blocks.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.