code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
def rpc_events_subscribe(handler, event_id, event_types=None, attributes=None):
"""
Subscribe the client to the specified event published by the server.
When the event is published the specified *attributes* of it and it's
corresponding id and type information will be sent to the client.
:param str event_id: The identifier of the event to subscribe to.
:param list event_types: A list of sub-types for the corresponding event.
:param list attributes: A list of attributes of the event object to be sent to the client.
"""
if not isinstance(event_id, str):
raise errors.KingPhisherAPIError('a valid event id must be specified')
event_socket = handler.rpc_session.event_socket
if event_socket is None:
raise errors.KingPhisherAPIError('the event socket is not open for this session')
if not event_id.startswith('db-'):
# db-<table name> events are the only ones that are valid right now
raise errors.KingPhisherAPIError('invalid event_id: ' + event_id)
table_name = event_id[3:]
table_name = table_name.replace('-', '_')
metatable = database_tables.get(table_name)
if metatable is None:
raise errors.KingPhisherAPIError("invalid table object: {0}".format(table_name))
for event_type in event_types:
if event_type not in ('deleted', 'inserted', 'updated'):
raise errors.KingPhisherAPIError("event type {0} is invalid for db-* events".format(event_type))
for column in attributes:
if column not in metatable.column_names:
raise errors.KingPhisherAPIError("column {0} is invalid for table {1}".format(column, table_name))
return event_socket.subscribe(event_id, event_types=event_types, attributes=attributes)
|
Subscribe the client to the specified event published by the server.
When the event is published the specified *attributes* of it and it's
corresponding id and type information will be sent to the client.
:param str event_id: The identifier of the event to subscribe to.
:param list event_types: A list of sub-types for the corresponding event.
:param list attributes: A list of attributes of the event object to be sent to the client.
|
rpc_events_subscribe
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_events_unsubscribe(handler, event_id, event_types=None, attributes=None):
"""
Unsubscribe from an event published by the server that the client
previously subscribed to.
:param str event_id: The identifier of the event to subscribe to.
:param list event_types: A list of sub-types for the corresponding event.
:param list attributes: A list of attributes of the event object to be sent to the client.
"""
if not isinstance(event_id, str):
raise errors.KingPhisherAPIError('a valid event id must be specified')
event_socket = handler.rpc_session.event_socket
if event_socket is None:
raise errors.KingPhisherAPIError('the event socket is not open for this session')
return event_socket.unsubscribe(event_id, event_types=event_types, attributes=attributes)
|
Unsubscribe from an event published by the server that the client
previously subscribed to.
:param str event_id: The identifier of the event to subscribe to.
:param list event_types: A list of sub-types for the corresponding event.
:param list attributes: A list of attributes of the event object to be sent to the client.
|
rpc_events_unsubscribe
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_geoip_lookup(handler, ip, lang=None):
"""
Look up an IP address in the servers GeoIP database. If the IP address
can not be found in the database, None will be returned.
:param str ip: The IP address to look up.
:param str lang: The language to prefer for regional names.
:return: The geographic information for the specified IP address.
:rtype: dict
"""
try:
result = geoip.lookup(ip, lang=lang)
except geoip.AddressNotFoundError:
result = None
return result
|
Look up an IP address in the servers GeoIP database. If the IP address
can not be found in the database, None will be returned.
:param str ip: The IP address to look up.
:param str lang: The language to prefer for regional names.
:return: The geographic information for the specified IP address.
:rtype: dict
|
rpc_geoip_lookup
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_geoip_lookup_multi(handler, ips, lang=None):
"""
Look up multiple IP addresses in the servers GeoIP database. Each IP
address that can not be found in the database will have its result set
to None.
:param list ips: The list of IP addresses to look up.
:param str lang: The language to prefer for regional names.
:return: A dictionary containing the results keyed by the specified IP
addresses.
:rtype: dict
"""
results = {}
for ip in ips:
try:
result = geoip.lookup(ip, lang=lang)
except geoip.AddressNotFoundError:
result = None
results[ip] = result
return results
|
Look up multiple IP addresses in the servers GeoIP database. Each IP
address that can not be found in the database will have its result set
to None.
:param list ips: The list of IP addresses to look up.
:param str lang: The language to prefer for regional names.
:return: A dictionary containing the results keyed by the specified IP
addresses.
:rtype: dict
|
rpc_geoip_lookup_multi
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_hostnames_add(handler, hostname):
"""
Add a hostname to the list of values that are configured for use with this
server. At this time, these changes (like other config changes) are not
persisted in the server so they will be lost when the server reboots.
.. versionadded:: 1.13.0
:param str hostname: The hostname to add.
"""
hostnames = handler.config.get_if_exists('server.hostnames', [])
if hostname not in hostnames:
hostnames.append(hostname)
handler.config.set('server.hostnames', hostnames)
# don't return a value indicating whether it was added or not because it could have been a vhost directory
|
Add a hostname to the list of values that are configured for use with this
server. At this time, these changes (like other config changes) are not
persisted in the server so they will be lost when the server reboots.
.. versionadded:: 1.13.0
:param str hostname: The hostname to add.
|
rpc_hostnames_add
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_plugins_list(handler):
"""
Return information regarding enabled plugins in the server.
:return: A dictionary representing enabled plugins and their meta-data.
:rtype: dict
"""
plugin_manager = handler.server.plugin_manager
plugins = {}
for _, plugin in plugin_manager:
plugins[plugin.name] = {
'authors': plugin.authors,
'classifiers': plugin.classifiers,
'description': plugin.description,
'homepage': plugin.homepage,
'name': plugin.name,
'reference_urls': plugin.reference_urls,
'title': plugin.title,
'version': plugin.version
}
return plugins
|
Return information regarding enabled plugins in the server.
:return: A dictionary representing enabled plugins and their meta-data.
:rtype: dict
|
rpc_plugins_list
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_graphql(handler, session, query, query_vars=None):
"""
Execute a GraphQL query and return the results. If the query fails to
execute the errors returned are populated in the **errors** key of the
results dictionary. If the query executes successfully the returned data
is available in the **data** key of the results dictionary.
:param str query: The GraphQL query to execute.
:param dict query_vars: Any variables needed by the *query*.
:return: The results of the query as a dictionary.
:rtype: dict
"""
query_vars = query_vars or {}
result = graphql_schema.execute(
query,
context_value={
'plugin_manager': handler.server.plugin_manager,
'rpc_session': handler.rpc_session,
'server_config': handler.config,
'session': session
},
variable_values=query_vars
)
errors = None
if result.errors:
errors = []
for error in result.errors:
if hasattr(error, 'message'):
errors.append(error.message)
elif hasattr(error, 'args') and error.args:
errors.append(str(error.args[0]))
else:
errors.append(repr(error))
return {'data': result.data, 'errors': errors}
|
Execute a GraphQL query and return the results. If the query fails to
execute the errors returned are populated in the **errors** key of the
results dictionary. If the query executes successfully the returned data
is available in the **data** key of the results dictionary.
:param str query: The GraphQL query to execute.
:param dict query_vars: Any variables needed by the *query*.
:return: The results of the query as a dictionary.
:rtype: dict
|
rpc_graphql
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_ssl_letsencrypt_certbot_version(handler):
"""
Find the certbot binary and retrieve it's version information. If the
certbot binary could not be found, ``None`` is returned.
.. versionadded:: 1.14.0
:return: The version of certbot.
:rtype: str
"""
bin_path = letsencrypt.get_certbot_bin_path(handler.config)
if bin_path is None:
return None
results = startup.run_process((bin_path, '--version'))
match = re.match(r'^certbot (?P<version>\d+\.\d+\.\d+)$', results.stdout)
if match is None:
return None
return match.group('version')
|
Find the certbot binary and retrieve it's version information. If the
certbot binary could not be found, ``None`` is returned.
.. versionadded:: 1.14.0
:return: The version of certbot.
:rtype: str
|
rpc_ssl_letsencrypt_certbot_version
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_ssl_letsencrypt_issue(handler, hostname, load=True):
"""
Issue a certificate with Let's Encrypt. This operation can fail for a wide
variety of reasons, check the ``message`` key of the returned dictionary for
a string description of what occurred. Successful operation requires that
the certbot utility be installed, and the server's Let's Encrypt data path
is configured.
.. versionadded:: 1.14.0
:param str hostname: The hostname of the certificate to issue.
:param bool load: Whether or not to load the certificate once it has been issued.
:return: A dictionary containing the results of the operation.
:rtype: dict
"""
config = handler.config
result = {'success': False}
letsencrypt_config = config.get_if_exists('server.letsencrypt', {})
# step 1: ensure that a letsencrypt configuration is available
data_path = letsencrypt_config.get('data_path')
if not data_path:
result['message'] = 'Let\'s Encrypt is not configured for use.'
return result
if not os.path.isdir(data_path):
rpc_logger.info('creating the letsencrypt data directory')
os.mkdir(data_path)
# step 2: ensure that SSL is enabled already
if not _ssl_is_enabled(handler):
result['message'] = 'Can not issue certificates when SSL is not in use.'
return result
if not advancedhttpserver.g_ssl_has_server_sni:
result['message'] = 'Can not issue certificates when SNI is not available.'
return result
# step 3: ensure that the certbot utility is available
bin_path = letsencrypt_config.get('certbot_path') or startup.which('certbot')
if not bin_path:
result['message'] = 'Can not issue certificates without the certbot utility.'
return result
# step 4: ensure the hostname looks legit (TM) and hasn't already been issued
if re.match(r'^[a-z0-9][a-z0-9-]*(\.[a-z0-9-]+)+$', hostname, flags=re.IGNORECASE) is None:
result['message'] = 'Can not issue certificates for invalid hostnames.'
return result
if letsencrypt.get_sni_hostname_config(hostname, config):
result['message'] = 'The specified hostname already has the necessary files.'
return result
# step 5: determine the web_root path for this hostname and create it if necessary
web_root = config.get('server.web_root')
if config.get('server.vhost_directories'):
web_root = os.path.join(web_root, hostname)
if not os.path.isdir(web_root):
rpc_logger.info('vhost directory does not exist for hostname: ' + hostname)
os.mkdir(web_root)
# step 6: issue the certificate with certbot, this starts the subprocess and may take a few seconds
with _lend_semaphore(handler):
status = letsencrypt.certbot_issue(web_root, hostname, bin_path=bin_path, unified_directory=data_path)
if status != os.EX_OK:
result['message'] = 'Failed to issue the certificate.'
return result
# step 7: ensure the necessary files were created
sni_config = letsencrypt.get_sni_hostname_config(hostname, config)
if sni_config is None:
result['message'] = 'The certificate files were not generated.'
return result
# step 8: store the data in the database so it can be loaded next time the server starts
if load:
handler.server.add_sni_cert(hostname, ssl_certfile=sni_config.certfile, ssl_keyfile=sni_config.keyfile)
else:
letsencrypt.set_sni_hostname(hostname, sni_config.certfile, sni_config.certfile, enabled=False)
result['success'] = True
result['message'] = 'The operation completed successfully.'
return result
|
Issue a certificate with Let's Encrypt. This operation can fail for a wide
variety of reasons, check the ``message`` key of the returned dictionary for
a string description of what occurred. Successful operation requires that
the certbot utility be installed, and the server's Let's Encrypt data path
is configured.
.. versionadded:: 1.14.0
:param str hostname: The hostname of the certificate to issue.
:param bool load: Whether or not to load the certificate once it has been issued.
:return: A dictionary containing the results of the operation.
:rtype: dict
|
rpc_ssl_letsencrypt_issue
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_ssl_sni_hostnames_get(handler):
"""
Get the hostnames that have available Server Name Indicator (SNI)
configurations for use with SSL.
.. versionadded:: 1.14.0
:return: A dictionary keyed by hostnames with values of dictionaries containing additional metadata.
:rtype: dict
"""
if not advancedhttpserver.g_ssl_has_server_sni:
rpc_logger.warning('can not enumerate SNI hostnames when SNI is not available')
return
hostnames = {}
for hostname, sni_config in letsencrypt.get_sni_hostnames(handler.config).items():
hostnames[hostname] = {'enabled': sni_config.enabled}
return hostnames
|
Get the hostnames that have available Server Name Indicator (SNI)
configurations for use with SSL.
.. versionadded:: 1.14.0
:return: A dictionary keyed by hostnames with values of dictionaries containing additional metadata.
:rtype: dict
|
rpc_ssl_sni_hostnames_get
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_ssl_sni_hostnames_load(handler, hostname):
"""
Load the SNI configuration for the specified *hostname*, effectively
enabling it. If SSL is not enabled, SNI is not available, or the necessary
data files are not available, this function returns ``False``.
.. versionadded:: 1.14.0
:param str hostname: The hostname to configure SSL for.
:return: Returns ``True`` only if the SNI configuration for *hostname* was
either able to be loaded or was already loaded.
:rtype: bool
"""
if not _ssl_is_enabled(handler):
rpc_logger.warning('can not add an SNI hostname when SSL is not in use')
return False
if not advancedhttpserver.g_ssl_has_server_sni:
rpc_logger.warning('can not add an SNI hostname when SNI is not available')
return False
for sni_cert in handler.server.get_sni_certs():
if sni_cert.hostname == hostname:
rpc_logger.info('ignoring directive to add an SNI hostname that already exists')
return True
sni_config = letsencrypt.get_sni_hostname_config(hostname, handler.config)
if not sni_config:
rpc_logger.warning('can not add an SNI hostname without the necessary files')
return False
handler.server.add_sni_cert(hostname, sni_config.certfile, sni_config.keyfile)
return True
|
Load the SNI configuration for the specified *hostname*, effectively
enabling it. If SSL is not enabled, SNI is not available, or the necessary
data files are not available, this function returns ``False``.
.. versionadded:: 1.14.0
:param str hostname: The hostname to configure SSL for.
:return: Returns ``True`` only if the SNI configuration for *hostname* was
either able to be loaded or was already loaded.
:rtype: bool
|
rpc_ssl_sni_hostnames_load
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_ssl_sni_hostnames_unload(handler, hostname):
"""
Unload the SNI configuration for the specified *hostname*, effectively
disabling it. If SNI is not available, or the specified configuration was
not already loaded, this function returns ``False``.
.. versionadded:: 1.14.0
:param str hostname: The hostname to configure SSL for.
:return: Returns ``True`` only if the SNI configuration for *hostname* was unloaded.
:rtype: bool
"""
if not advancedhttpserver.g_ssl_has_server_sni:
rpc_logger.warning('can not remove an SNI hostname when SNI is not available')
return False
for sni_cert in handler.server.get_sni_certs():
if sni_cert.hostname == hostname:
break
else:
rpc_logger.warning('can not remove an SNI hostname that does not exist')
return False
handler.server.remove_sni_cert(sni_cert.hostname)
return True
|
Unload the SNI configuration for the specified *hostname*, effectively
disabling it. If SNI is not available, or the specified configuration was
not already loaded, this function returns ``False``.
.. versionadded:: 1.14.0
:param str hostname: The hostname to configure SSL for.
:return: Returns ``True`` only if the SNI configuration for *hostname* was unloaded.
:rtype: bool
|
rpc_ssl_sni_hostnames_unload
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def rpc_ssl_status(handler):
"""
Get information regarding the status of SSL on the server. This method
returns a dictionary with keys describing whether or not SSL is enabled on
one or more interfaces, and whether or not the server possess the SNI
support. For details regarding which addresses are using SSL, see the
:py:func:`~rpc_config_get` method.
.. versionadded:: 1.14.0
:return: A dictionary with SSL status information.
:rtype: dict
"""
status = {
'enabled': _ssl_is_enabled(handler),
'has-letsencrypt': letsencrypt.get_certbot_bin_path(handler.config) is not None,
'has-sni': advancedhttpserver.g_ssl_has_server_sni
}
return status
|
Get information regarding the status of SSL on the server. This method
returns a dictionary with keys describing whether or not SSL is enabled on
one or more interfaces, and whether or not the server possess the SNI
support. For details regarding which addresses are using SSL, see the
:py:func:`~rpc_config_get` method.
.. versionadded:: 1.14.0
:return: A dictionary with SSL status information.
:rtype: dict
|
rpc_ssl_status
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/server_rpc.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/server_rpc.py
|
BSD-3-Clause
|
def embed_youtube_video(video_id, autoplay=True, enable_js=False, start=0, end=None):
"""
A Jinja function to embed a video into a web page using YouTube's
`iframe API <https://developers.google.com/youtube/iframe_api_reference>`_.
In order to enable a training button after the video has ended the
youtube.js file needs to be included and *enable_js* just be set to True. If
*start* or *end* are specified as strings, the must be in a format suitable
to be parsed by :py:func:`~smoke_zephyr.utilities.parse_timespan`.
:param str video_id: The id of the YouTube video to embed.
:param bool autoplay: Start playing the video as soon as the page loads.
:param bool enable_js: Enable the Javascript API.
:param start: The time offset at which the video should begin playing.
:type start: int, str
:param end: The time offset at which the video should stop playing.
:type end: int, str
"""
autoplay = int(autoplay)
yt_url = "https://www.youtube.com/embed/{0}?autoplay={1}&modestbranding=1&rel=0&showinfo=0".format(video_id, autoplay)
if enable_js:
yt_url += '&enablejsapi=1'
if start:
if isinstance(start, str):
start = smoke_zephyr.utilities.parse_timespan(start)
yt_url += "&start={0}".format(start)
if end:
if isinstance(end, str):
end = smoke_zephyr.utilities.parse_timespan(end)
yt_url += "&end={0}".format(end)
iframe_tag = "<iframe id=\"ytplayer\" type=\"text/html\" width=\"720\" height=\"405\" src=\"{0}\" frameborder=\"0\" allowfullscreen></iframe>".format(yt_url)
return markupsafe.Markup(iframe_tag)
|
A Jinja function to embed a video into a web page using YouTube's
`iframe API <https://developers.google.com/youtube/iframe_api_reference>`_.
In order to enable a training button after the video has ended the
youtube.js file needs to be included and *enable_js* just be set to True. If
*start* or *end* are specified as strings, the must be in a format suitable
to be parsed by :py:func:`~smoke_zephyr.utilities.parse_timespan`.
:param str video_id: The id of the YouTube video to embed.
:param bool autoplay: Start playing the video as soon as the page loads.
:param bool enable_js: Enable the Javascript API.
:param start: The time offset at which the video should begin playing.
:type start: int, str
:param end: The time offset at which the video should stop playing.
:type end: int, str
|
embed_youtube_video
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/template_extras.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/template_extras.py
|
BSD-3-Clause
|
def make_redirect_page(url, title='Automatic Redirect'):
"""
A Jinja function which will create an HTML page that will automatically
redirect the viewer to a different url.
:param str url: The URL to redirect the user to.
:param str title: The title to use in the resulting HTML page.
"""
title = html.escape(title, quote=True)
url = html.escape(url, quote=True)
page = []
page.append('<!DOCTYPE html>')
page.append('<html lang="en-US">')
page.append(' <head>')
page.append(" <title>{0}</title>".format(title))
page.append(" <meta http-equiv=\"refresh\" content=\"0;url={0}\" />".format(url))
page.append(' </head>')
page.append(' <body>')
page.append(" <p>The content you are looking for has been moved. If you are not redirected automatically then <a href=\"{0}\">click here</a> to proceed.</p>".format(url))
page.append(' </body>')
page.append('</html>')
page = '\n'.join(page)
return markupsafe.Markup(page)
|
A Jinja function which will create an HTML page that will automatically
redirect the viewer to a different url.
:param str url: The URL to redirect the user to.
:param str title: The title to use in the resulting HTML page.
|
make_redirect_page
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/template_extras.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/template_extras.py
|
BSD-3-Clause
|
def __init__(self, handler, manager):
"""
:param handler: The request handler that should be used by this socket.
:type handler: :py:class:`advancedhttpserver.RequestHandler`
:param manager: The manager that this event socket should register with.
:type manager: :py:class:`.WebSocketsManager`
"""
handler.connection.settimeout(None)
self._subscriptions = {}
self.rpc_session = handler.rpc_session
if self.rpc_session.event_socket is not None:
self.rpc_session.event_socket.close()
self.rpc_session.event_socket = self
manager.add(self)
self._manager_ref = weakref.ref(manager)
super(EventSocket, self).__init__(handler)
|
:param handler: The request handler that should be used by this socket.
:type handler: :py:class:`advancedhttpserver.RequestHandler`
:param manager: The manager that this event socket should register with.
:type manager: :py:class:`.WebSocketsManager`
|
__init__
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/web_sockets.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/web_sockets.py
|
BSD-3-Clause
|
def is_subscribed(self, event_id, event_type):
"""
Check if the client is currently subscribed to the specified server event.
:param str event_id: The identifier of the event to subscribe to.
:param str event_type: A sub-type for the corresponding event.
:return: Whether or not the client is subscribed to the event.
:rtype: bool
"""
if event_id not in self._subscriptions:
return False
return event_type in self._subscriptions[event_id].event_types
|
Check if the client is currently subscribed to the specified server event.
:param str event_id: The identifier of the event to subscribe to.
:param str event_type: A sub-type for the corresponding event.
:return: Whether or not the client is subscribed to the event.
:rtype: bool
|
is_subscribed
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/web_sockets.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/web_sockets.py
|
BSD-3-Clause
|
def publish(self, event):
"""
Publish the event by sending the relevant information to the client.
If the client has not requested to receive the information through a
subscription, then no data will be sent.
:param event: The object representing the data to be published.
:type event: :py:class:`.Event`
"""
subscription = self._subscriptions.get(event.event_id)
if subscription is None:
return
if event.event_type not in subscription.event_types:
return
summaries = []
for source in event.sources:
if isinstance(source, db_models.Base) and not source.session_has_permissions('r', self.rpc_session):
continue
summary = dict((attribute, getattr(source, attribute, None)) for attribute in subscription.attributes)
summaries.append(summary)
if not summaries:
return
msg = {
'event': {
'id': event.event_id,
'type': event.event_type,
'objects': summaries
}
}
self.logger.debug("publishing event {0} (type: {1}) with {2} objects".format(event.event_id, event.event_type, len(summaries)))
self.send_message_text(serializers.JSON.dumps(msg, pretty=False))
|
Publish the event by sending the relevant information to the client.
If the client has not requested to receive the information through a
subscription, then no data will be sent.
:param event: The object representing the data to be published.
:type event: :py:class:`.Event`
|
publish
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/web_sockets.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/web_sockets.py
|
BSD-3-Clause
|
def __init__(self, config, job_manager):
"""
:param config: Configuration to retrieve settings from.
:type config: :py:class:`smoke_zephyr.configuration.Configuration`
:param job_manager: A job manager instance that can be used to schedule tasks.
:type job_manager: :py:class:`smoke_zephyr.job.JobManager`
"""
self.config = config
self.web_sockets = []
self.job_manager = job_manager
self._ping_job = job_manager.job_add(self.ping_all, seconds=30)
self._work_queue = queue.Queue()
self._worker_thread = threading.Thread(target=self._worker_routine)
self._worker_thread.start()
signals.db_session_deleted.connect(self._sig_db_deleted)
signals.db_session_inserted.connect(self._sig_db_inserted)
signals.db_session_updated.connect(self._sig_db_updated)
|
:param config: Configuration to retrieve settings from.
:type config: :py:class:`smoke_zephyr.configuration.Configuration`
:param job_manager: A job manager instance that can be used to schedule tasks.
:type job_manager: :py:class:`smoke_zephyr.job.JobManager`
|
__init__
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/web_sockets.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/web_sockets.py
|
BSD-3-Clause
|
def dispatch(self, handler):
"""
A method that is suitable for use as a
:py:attr:`~advancedhttpserver.RequestHandler.web_socket_handler`.
:param handler: The current request handler instance.
:type handler: :py:class:`~king_phisher.server.server.KingPhisherRequestHandler`
"""
if not ipaddress.ip_address(handler.client_address[0]).is_loopback:
return
prefix = '/'
if self.config.get('server.vhost_directories'):
prefix += handler.vhost + '/'
request_path = handler.path
if request_path.startswith(prefix):
request_path = request_path[len(prefix):]
if request_path == '_/ws/events/json':
EventSocket(handler, self)
return
handler.respond_not_found()
return
|
A method that is suitable for use as a
:py:attr:`~advancedhttpserver.RequestHandler.web_socket_handler`.
:param handler: The current request handler instance.
:type handler: :py:class:`~king_phisher.server.server.KingPhisherRequestHandler`
|
dispatch
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/web_sockets.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/web_sockets.py
|
BSD-3-Clause
|
def ping_all(self):
"""
Ping all of the connected web sockets to ensure they stay alive. This
method is automatically executed periodically through a job added when
the manager is initialized.
"""
disconnected = collections.deque()
for web_socket in self.web_sockets:
if web_socket.connected:
try:
web_socket.ping()
except Exception:
self.logger.error('error occurred while pinging the web socket, closing it', exc_info=True)
web_socket.close()
else:
continue
disconnected.append(web_socket)
for web_socket in disconnected:
self.logger.debug('closing a disconnected web socket')
self.web_sockets.remove(web_socket)
|
Ping all of the connected web sockets to ensure they stay alive. This
method is automatically executed periodically through a job added when
the manager is initialized.
|
ping_all
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/web_sockets.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/web_sockets.py
|
BSD-3-Clause
|
def stop(self):
"""
Shutdown the manager and clean up the resources it has allocated.
"""
self.job_manager.job_delete(self._ping_job)
for web_socket in self.web_sockets:
if web_socket.connected:
web_socket.close()
self.web_sockets = []
self._work_queue.put(None)
self._worker_thread.join()
|
Shutdown the manager and clean up the resources it has allocated.
|
stop
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/web_sockets.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/web_sockets.py
|
BSD-3-Clause
|
def get_hostnames(config):
"""
List the hostnames that are configured for this server instance. This list
is generated by first checking the server's configuration for the
``hostnames`` option. Then if ``vhost_directories`` is enabled, the webroot
is checked for additional values.
.. note::
This function makes no attempt to validate these values, they are
strictly what have been configured for use.
.. versionadded:: 1.13.0
:param config: Configuration to retrieve settings from.
:type config: :py:class:`smoke_zephyr.configuration.Configuration`
:return: A tuple of the enumerated hostnames.
:rtype: tuple
"""
hostnames = config.get_if_exists('server.hostnames', [])
hostnames.extend(get_vhost_directories(config) or ())
hostnames.extend(letsencrypt.get_sni_hostnames(config).keys())
hostnames = smoke_zephyr.utilities.unique(hostnames)
return tuple(sorted(hostnames))
|
List the hostnames that are configured for this server instance. This list
is generated by first checking the server's configuration for the
``hostnames`` option. Then if ``vhost_directories`` is enabled, the webroot
is checked for additional values.
.. note::
This function makes no attempt to validate these values, they are
strictly what have been configured for use.
.. versionadded:: 1.13.0
:param config: Configuration to retrieve settings from.
:type config: :py:class:`smoke_zephyr.configuration.Configuration`
:return: A tuple of the enumerated hostnames.
:rtype: tuple
|
get_hostnames
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/web_tools.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/web_tools.py
|
BSD-3-Clause
|
def get_vhost_directories(config):
"""
List the hostnames that are configured through the Virtual Host directories.
If the server option ``vhost_directories`` is disabled, this function
returns ``None``.
.. versionadded:: 1.13.0
:param config: Configuration to retrieve settings from.
:type config: :py:class:`smoke_zephyr.configuration.Configuration`
:return: A tuple of the enumerated virtual hostname directories.
:rtype: tuple
"""
if not config.get('server.vhost_directories'):
return None
web_root = config.get('server.web_root')
directories = [entry for entry in os.listdir(web_root) if os.path.isdir(os.path.join(web_root, entry))]
return tuple(sorted(directories))
|
List the hostnames that are configured through the Virtual Host directories.
If the server option ``vhost_directories`` is disabled, this function
returns ``None``.
.. versionadded:: 1.13.0
:param config: Configuration to retrieve settings from.
:type config: :py:class:`smoke_zephyr.configuration.Configuration`
:return: A tuple of the enumerated virtual hostname directories.
:rtype: tuple
|
get_vhost_directories
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/web_tools.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/web_tools.py
|
BSD-3-Clause
|
def _ex_config_logging(arguments, config, console_handler):
"""
If a setting is configured improperly, this will terminate execution via
:py:func:`sys.exit`.
:return: The path to a log file if one is in use.
:rtype: str
"""
default_log_level = min(
getattr(logging, (arguments.loglvl or constants.DEFAULT_LOG_LEVEL)),
getattr(logging, config.get_if_exists('logging.level', 'critical').upper())
)
log_levels = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'FATAL')
file_path = None
if config.has_option('logging.file'):
options = config.get('logging.file')
for _ in range(1):
default_format = '%(asctime)s %(name)-50s %(levelname)-8s %(message)s'
if isinstance(options, dict): # new style
if not options.get('enabled', True):
break
if 'path' not in options:
color.print_error('logging.file is missing required key \'path\'')
sys.exit(os.EX_CONFIG)
if 'level' not in options:
color.print_error('logging.file is missing required key \'level\'')
sys.exit(os.EX_CONFIG)
file_path = options['path']
formatter = logging.Formatter(options.get('format', default_format))
if not options['level'].upper() in log_levels:
color.print_error('logging.file.level is invalid, must be one of: ' + ', '.join(log_levels))
sys.exit(os.EX_CONFIG)
log_level = getattr(logging, options['level'].upper())
root = options.get('root', '')
elif isinstance(options, str): # old style
file_path = options
formatter = logging.Formatter(default_format)
log_level = default_log_level
root = ''
else:
break
file_handler = logging.FileHandler(file_path)
file_handler.setFormatter(formatter)
logging.getLogger(root).addHandler(file_handler)
file_handler.setLevel(log_level)
if config.has_option('logging.console'):
options = config.get('logging.console')
for _ in range(1):
if isinstance(options, dict): # new style
if not options.get('enabled', True):
break
if 'format' in options:
console_handler.setFormatter(color.ColoredLogFormatter(options['format']))
if arguments.loglvl is None and 'level' in options:
log_level = str(options.get('level', '')).upper()
if log_level not in log_levels:
color.print_error('logging.console.level is invalid, must be one of: ' + ', '.join(log_levels))
sys.exit(os.EX_CONFIG)
console_handler.setLevel(getattr(logging, log_level))
elif isinstance(options, str): # old style
console_handler.setLevel(default_log_level)
return file_path
|
If a setting is configured improperly, this will terminate execution via
:py:func:`sys.exit`.
:return: The path to a log file if one is in use.
:rtype: str
|
_ex_config_logging
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/__main__.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/__main__.py
|
BSD-3-Clause
|
def clear_database():
"""
Delete all data from all tables in the connected database. The database
schema will remain unaffected.
.. warning::
This action can not be reversed and there is no confirmation before it
takes place.
"""
engine = Session.connection().engine
with contextlib.closing(engine.connect()) as connection:
transaction = connection.begin()
for table in reversed(models.metadata.sorted_tables):
connection.execute(table.delete())
transaction.commit()
|
Delete all data from all tables in the connected database. The database
schema will remain unaffected.
.. warning::
This action can not be reversed and there is no confirmation before it
takes place.
|
clear_database
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/manager.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/manager.py
|
BSD-3-Clause
|
def export_database(target_file):
"""
Export the contents of the database using SQLAlchemy's serialization. This
creates an archive file containing all of the tables and their data. The
resulting export can be imported into another supported database so long
as the :py:data:`~king_phisher.server.database.models.SCHEMA_VERSION` is the
same.
:param str target_file: The file to write the export to.
"""
session = Session()
kpdb = archive.ArchiveFile(target_file, 'w')
kpdb.metadata['database-schema'] = models.SCHEMA_VERSION
for table in models.metadata.sorted_tables:
table_name = table.name
model = models.database_table[table_name].model
kpdb.add_data('tables/' + table_name, sqlalchemy.ext.serializer.dumps(session.query(model).all()))
kpdb.close()
|
Export the contents of the database using SQLAlchemy's serialization. This
creates an archive file containing all of the tables and their data. The
resulting export can be imported into another supported database so long
as the :py:data:`~king_phisher.server.database.models.SCHEMA_VERSION` is the
same.
:param str target_file: The file to write the export to.
|
export_database
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/manager.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/manager.py
|
BSD-3-Clause
|
def import_database(target_file, clear=True):
"""
Import the contents of a serialized database from an archive previously
created with the :py:func:`.export_database` function. The current
:py:data:`~king_phisher.server.database.models.SCHEMA_VERSION` must be the
same as the exported archive.
.. warning::
This will by default delete the contents of the current database in
accordance with the *clear* parameter. If *clear* is not
specified and objects in the database and import share an ID, they will
be merged.
:param str target_file: The database archive file to import from.
:param bool clear: Whether or not to delete the contents of the
existing database before importing the new data.
"""
kpdb = archive.ArchiveFile(target_file, 'r')
schema_version = kpdb.metadata['database-schema']
if schema_version != models.SCHEMA_VERSION:
raise errors.KingPhisherDatabaseError("incompatible database schema versions ({0} vs {1})".format(schema_version, models.SCHEMA_VERSION))
if clear:
clear_database()
session = Session()
for table in models.metadata.sorted_tables:
table_data = kpdb.get_data('tables/' + table.name)
for row in sqlalchemy.ext.serializer.loads(table_data):
session.merge(row)
session.commit()
kpdb.close()
|
Import the contents of a serialized database from an archive previously
created with the :py:func:`.export_database` function. The current
:py:data:`~king_phisher.server.database.models.SCHEMA_VERSION` must be the
same as the exported archive.
.. warning::
This will by default delete the contents of the current database in
accordance with the *clear* parameter. If *clear* is not
specified and objects in the database and import share an ID, they will
be merged.
:param str target_file: The database archive file to import from.
:param bool clear: Whether or not to delete the contents of the
existing database before importing the new data.
|
import_database
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/manager.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/manager.py
|
BSD-3-Clause
|
def get_metadata(key, session=None):
"""
Store a piece of metadata regarding the King Phisher database.
:param str key: The name of the data.
:param value: The value to store.
:type value: int, str
:param session: The session to use to store the value.
"""
if not isinstance(key, str):
raise TypeError('key must be a str instance')
close_session = session is None
session = (session or Session())
obj = session.query(models.StorageData).filter_by(namespace=_metadata_namespace, key=key).first()
if obj is None:
raise KeyError(key)
value = obj.value
if value is not None:
value = _metadata_serializer.loads(value)
if close_session:
session.commit()
session.close()
return value
|
Store a piece of metadata regarding the King Phisher database.
:param str key: The name of the data.
:param value: The value to store.
:type value: int, str
:param session: The session to use to store the value.
|
get_metadata
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/manager.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/manager.py
|
BSD-3-Clause
|
def get_row_by_id(session, table, row_id):
"""
Retrieve a database row from the specified table by it's unique id.
:param session: The database session to use for the query.
:type session: `.Session`
:param table: The table object or the name of the database table where the row resides.
:param row_id: The id of the row to retrieve.
:return: The object representing the specified row or None if it does not exist.
"""
if not issubclass(table, models.Base):
table = models.database_tables[table].model
query = session.query(table)
query = query.filter_by(id=row_id)
result = query.first()
return result
|
Retrieve a database row from the specified table by it's unique id.
:param session: The database session to use for the query.
:type session: `.Session`
:param table: The table object or the name of the database table where the row resides.
:param row_id: The id of the row to retrieve.
:return: The object representing the specified row or None if it does not exist.
|
get_row_by_id
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/manager.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/manager.py
|
BSD-3-Clause
|
def normalize_connection_url(connection_url):
"""
Normalize a connection url by performing any conversions necessary for it to
be used with the database API.
:param str connection_url: The connection url to normalize.
:return: The normalized connection url.
:rtype: str
"""
if connection_url == ':memory:':
connection_url = 'sqlite://'
elif os.path.isfile(connection_url) or os.path.isdir(os.path.dirname(connection_url)):
connection_url = 'sqlite:///' + os.path.abspath(connection_url)
return connection_url
|
Normalize a connection url by performing any conversions necessary for it to
be used with the database API.
:param str connection_url: The connection url to normalize.
:return: The normalized connection url.
:rtype: str
|
normalize_connection_url
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/manager.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/manager.py
|
BSD-3-Clause
|
def init_alembic(engine, schema_version):
"""
Creates the alembic_version table and sets the value of the table according
to the specified schema version.
:param engine: The engine used to connect to the database.
:type engine: :py:class:`sqlalchemy.engine.Engine`
:param int schema_version: The MetaData schema_version to set the alembic version to.
"""
pattern = re.compile(r'[a-f0-9]{10,16}_schema_v\d+\.py')
alembic_revision = None
alembic_directory = find.data_directory('alembic')
if not alembic_directory:
raise errors.KingPhisherDatabaseError('cannot find the alembic data directory')
alembic_versions_files = os.listdir(os.path.join(alembic_directory, 'versions'))
for file in alembic_versions_files:
if not pattern.match(file):
continue
if not file.endswith('_schema_v' + str(schema_version) + '.py'):
continue
alembic_revision = file.split('_', 1)[0]
break
if not alembic_revision:
raise errors.KingPhisherDatabaseError("cannot find current alembic version for schema version {0}".format(schema_version))
alembic_metadata = sqlalchemy.MetaData(engine)
alembic_table = sqlalchemy.Table(
'alembic_version',
alembic_metadata,
sqlalchemy.Column(
'version_num',
sqlalchemy.String,
primary_key=True,
nullable=False
)
)
alembic_metadata.create_all()
alembic_version_entry = alembic_table.insert().values(version_num=alembic_revision)
engine.connect().execute(alembic_version_entry)
logger.info("alembic_version table initialized to {0}".format(alembic_revision))
|
Creates the alembic_version table and sets the value of the table according
to the specified schema version.
:param engine: The engine used to connect to the database.
:type engine: :py:class:`sqlalchemy.engine.Engine`
:param int schema_version: The MetaData schema_version to set the alembic version to.
|
init_alembic
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/manager.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/manager.py
|
BSD-3-Clause
|
def init_database(connection_url, extra_init=False):
"""
Create and initialize the database engine. This must be done before the
session object can be used. This will also attempt to perform any updates to
the database schema if the backend supports such operations.
:param str connection_url: The url for the database connection.
:param bool extra_init: Run optional extra dbms-specific initialization logic.
:return: The initialized database engine.
"""
connection_url = normalize_connection_url(connection_url)
connection_url = sqlalchemy.engine.url.make_url(connection_url)
logger.info("initializing database connection with driver {0}".format(connection_url.drivername))
if connection_url.drivername == 'sqlite':
engine = sqlalchemy.create_engine(connection_url, connect_args={'check_same_thread': False}, poolclass=sqlalchemy.pool.StaticPool)
sqlalchemy.event.listens_for(engine, 'begin')(lambda conn: conn.execute('BEGIN'))
elif connection_url.drivername == 'postgresql':
if extra_init:
init_database_postgresql(connection_url)
engine = sqlalchemy.create_engine(connection_url, connect_args={'client_encoding': 'utf8'})
else:
raise errors.KingPhisherDatabaseError('only sqlite and postgresql database drivers are supported')
try:
Session.remove()
Session.configure(bind=engine)
inspector = sqlalchemy.inspect(engine)
except sqlalchemy.exc.OperationalError as error:
if error.args:
match = re.match(r'\(psycopg2\.OperationalError\) FATAL:\s+\w+ authentication failed for user \"(?P<username>\w+)\"$', error.args[0])
if match:
raise errors.KingPhisherDatabaseAuthenticationError('database initialization failed', username=match.group('username')) from None
logger.debug('encountered a sqlalchemy OperationalError while initializing the database', exc_info=True)
raise errors.KingPhisherDatabaseError('database initialization failed') from error
if 'campaigns' not in inspector.get_table_names():
logger.debug('campaigns table not found, creating all new tables')
try:
models.Base.metadata.create_all(engine)
except sqlalchemy.exc.SQLAlchemyError as error:
error_lines = (line.strip() for line in error.message.split('\n'))
raise errors.KingPhisherDatabaseError('SQLAlchemyError: ' + ' '.join(error_lines).strip())
schema_version = get_schema_version(engine)
logger.debug("current database schema version: {0} ({1})".format(schema_version, ('latest' if schema_version == models.SCHEMA_VERSION else 'obsolete')))
if 'alembic_version' not in inspector.get_table_names():
logger.debug('alembic version table not found, attempting to create and set version')
init_alembic(engine, schema_version)
if schema_version > models.SCHEMA_VERSION:
raise errors.KingPhisherDatabaseError('the database schema is for a newer version, automatic downgrades are not supported')
elif schema_version < models.SCHEMA_VERSION:
alembic_config_file = find.data_file('alembic.ini')
if not alembic_config_file:
raise errors.KingPhisherDatabaseError('cannot find the alembic.ini configuration file')
alembic_directory = find.data_directory('alembic')
if not alembic_directory:
raise errors.KingPhisherDatabaseError('cannot find the alembic data directory')
config = alembic.config.Config(alembic_config_file)
config.config_file_name = alembic_config_file
config.set_main_option('script_location', alembic_directory)
config.set_main_option('skip_logger_config', 'True')
config.set_main_option('sqlalchemy.url', str(connection_url))
logger.warning("automatically updating the database schema from version {0} to {1}".format(schema_version, models.SCHEMA_VERSION))
try:
alembic.command.upgrade(config, 'head')
except Exception as error:
logger.critical("database schema upgrade failed with exception: {0}.{1} {2}".format(error.__class__.__module__, error.__class__.__name__, getattr(error, 'message', '')).rstrip(), exc_info=True)
raise errors.KingPhisherDatabaseError('failed to upgrade to the latest database schema')
logger.info("successfully updated the database schema from version {0} to {1}".format(schema_version, models.SCHEMA_VERSION))
# reset it because it may have been altered by alembic
Session.remove()
Session.configure(bind=engine)
set_metadata('database_driver', connection_url.drivername)
set_metadata('last_started', datetime.datetime.utcnow())
set_metadata('schema_version', models.SCHEMA_VERSION)
logger.debug("connected to {0} database: {1}".format(connection_url.drivername, connection_url.database))
signals.db_initialized.send(connection_url)
return engine
|
Create and initialize the database engine. This must be done before the
session object can be used. This will also attempt to perform any updates to
the database schema if the backend supports such operations.
:param str connection_url: The url for the database connection.
:param bool extra_init: Run optional extra dbms-specific initialization logic.
:return: The initialized database engine.
|
init_database
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/manager.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/manager.py
|
BSD-3-Clause
|
def init_database_postgresql(connection_url):
"""
Perform additional initialization checks and operations for a PostgreSQL
database. If the database is hosted locally this will ensure that the
service is currently running and start it if it is not. Additionally if the
specified database or user do not exist, they will be created.
:param connection_url: The url for the PostgreSQL database connection.
:type connection_url: :py:class:`sqlalchemy.engine.url.URL`
:return: The initialized database engine.
"""
if not ipaddress.is_loopback(connection_url.host):
return
is_sanitary = lambda s: re.match(r'^[a-zA-Z0-9_]+$', s) is not None
systemctl_bin = smoke_zephyr.utilities.which('systemctl')
if systemctl_bin is None:
logger.info('postgresql service status check failed (could not find systemctl)')
else:
postgresql_setup = smoke_zephyr.utilities.which('postgresql-setup')
if postgresql_setup is None:
logger.debug('postgresql-setup was not found')
else:
logger.debug('using postgresql-setup to ensure that the database is initialized')
startup.run_process([postgresql_setup, '--initdb'])
results = startup.run_process([systemctl_bin, 'status', 'postgresql.service'])
# wait for the process to return and check if it's running (status 0)
if results.status == os.EX_OK:
logger.debug('postgresql service is already running via systemctl')
else:
logger.info('postgresql service is not running, starting it now via systemctl')
results = startup.run_process([systemctl_bin, 'start', 'postgresql'])
if results.status != os.EX_OK:
logger.error('failed to start the postgresql service via systemctl')
raise errors.KingPhisherDatabaseError('postgresql service failed to start via systemctl')
logger.debug('postgresql service successfully started via systemctl')
rows = _popen_psql('SELECT usename FROM pg_user')
if connection_url.username not in rows:
logger.info('the specified postgresql user does not exist, adding it now')
if not is_sanitary(connection_url.username):
raise errors.KingPhisherInputValidationError('will not create the postgresql user (username contains bad characters)')
if not is_sanitary(connection_url.password):
raise errors.KingPhisherInputValidationError('will not create the postgresql user (password contains bad characters)')
rows = _popen_psql("CREATE USER {url.username} WITH PASSWORD '{url.password}'".format(url=connection_url))
if rows != ['CREATE ROLE']:
logger.error('failed to create the postgresql user')
raise errors.KingPhisherDatabaseError('failed to create the postgresql user')
logger.debug('the specified postgresql user was successfully created')
rows = _popen_psql('SELECT datname FROM pg_database')
if connection_url.database not in rows:
logger.info('the specified postgresql database does not exist, adding it now')
if not is_sanitary(connection_url.database):
raise errors.KingPhisherInputValidationError('will not create the postgresql database (name contains bad characters)')
rows = _popen_psql("CREATE DATABASE {url.database} OWNER {url.username}".format(url=connection_url))
if rows != ['CREATE DATABASE']:
logger.error('failed to create the postgresql database')
raise errors.KingPhisherDatabaseError('failed to create the postgresql database')
logger.debug('the specified postgresql database was successfully created')
|
Perform additional initialization checks and operations for a PostgreSQL
database. If the database is hosted locally this will ensure that the
service is currently running and start it if it is not. Additionally if the
specified database or user do not exist, they will be created.
:param connection_url: The url for the PostgreSQL database connection.
:type connection_url: :py:class:`sqlalchemy.engine.url.URL`
:return: The initialized database engine.
|
init_database_postgresql
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/manager.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/manager.py
|
BSD-3-Clause
|
def register_table(table):
"""
Register a database table. This will populate the information provided in
DATABASE_TABLES dictionary. This also forwards signals to the appropriate
listeners within the :py:mod:`server.signal` module.
:param cls table: The table to register.
"""
metatable = table.metatable()
database_tables[metatable.name] = metatable
sqlalchemy.event.listen(table, 'before_delete', forward_signal_delete)
sqlalchemy.event.listen(table, 'before_insert', forward_signal_insert)
sqlalchemy.event.listen(table, 'before_update', forward_signal_update)
return table
|
Register a database table. This will populate the information provided in
DATABASE_TABLES dictionary. This also forwards signals to the appropriate
listeners within the :py:mod:`server.signal` module.
:param cls table: The table to register.
|
register_table
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/models.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/models.py
|
BSD-3-Clause
|
def assert_session_has_permissions(self, *args, **kwargs):
"""
A convenience function which wraps :py:meth:`~.session_has_permissions`
and raises a :py:exc:`~king_phisher.errors.KingPhisherPermissionError`
if the session does not have the specified permissions.
"""
if self.session_has_permissions(*args, **kwargs):
return
raise errors.KingPhisherPermissionError()
|
A convenience function which wraps :py:meth:`~.session_has_permissions`
and raises a :py:exc:`~king_phisher.errors.KingPhisherPermissionError`
if the session does not have the specified permissions.
|
assert_session_has_permissions
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/models.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/models.py
|
BSD-3-Clause
|
def session_has_permissions(self, access, session):
"""
Check that the authenticated session has the permissions specified in
*access*. The permissions in *access* are abbreviated with the first
letter of create, read, update, and delete. For example, to check for
read and update permissions, *access* would be ``'ru'``.
.. note::
This will always return ``True`` for sessions which are for
administrative users. To maintain this logic, this method **should
not** be overridden in subclasses. Instead override the specific
``_session_has_*_access`` methods as necessary.
:param str access: The desired permissions.
:param session: The authenticated session to check access for.
:return: Whether the session has the desired permissions.
:rtype: bool
"""
if session.user_is_admin:
return True
cls = self.__class__
if cls.is_private:
return False
access = access.lower()
for case in utilities.switch(access, comp=operator.contains, swapped=True):
if case('c') and not cls.session_has_create_access(session, instance=self):
break
if case('r') and not cls.session_has_read_access(session, instance=self):
break
if case('u') and not cls.session_has_update_access(session, instance=self):
break
if case('d') and not cls.session_has_delete_access(session, instance=self):
break
else:
return True
return False
|
Check that the authenticated session has the permissions specified in
*access*. The permissions in *access* are abbreviated with the first
letter of create, read, update, and delete. For example, to check for
read and update permissions, *access* would be ``'ru'``.
.. note::
This will always return ``True`` for sessions which are for
administrative users. To maintain this logic, this method **should
not** be overridden in subclasses. Instead override the specific
``_session_has_*_access`` methods as necessary.
:param str access: The desired permissions.
:param session: The authenticated session to check access for.
:return: Whether the session has the desired permissions.
:rtype: bool
|
session_has_permissions
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/models.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/models.py
|
BSD-3-Clause
|
def session_has_create_access(cls, session, instance=None):
"""
Check that the authenticated *session* has access to create the
specified model *instance*.
:param session: The authenticated session to check access for.
:param instance: The optional model instance to inspect.
:return: Whether the session has the desired permissions.
:rtype: bool
"""
if session.user_is_admin:
return True
return cls._session_has_create_access(session, instance=instance)
|
Check that the authenticated *session* has access to create the
specified model *instance*.
:param session: The authenticated session to check access for.
:param instance: The optional model instance to inspect.
:return: Whether the session has the desired permissions.
:rtype: bool
|
session_has_create_access
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/models.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/models.py
|
BSD-3-Clause
|
def session_has_delete_access(cls, session, instance=None):
"""
Check that the authenticated *session* has access to delete the
specified model *instance*.
:param session: The authenticated session to check access for.
:param instance: The optional model instance to inspect.
:return: Whether the session has the desired permissions.
:rtype: bool
"""
if session.user_is_admin:
return True
return cls._session_has_delete_access(session, instance=instance)
|
Check that the authenticated *session* has access to delete the
specified model *instance*.
:param session: The authenticated session to check access for.
:param instance: The optional model instance to inspect.
:return: Whether the session has the desired permissions.
:rtype: bool
|
session_has_delete_access
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/models.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/models.py
|
BSD-3-Clause
|
def session_has_read_access(cls, session, instance=None):
"""
Check that the authenticated *session* has access to read the
specified model *instance*.
:param session: The authenticated session to check access for.
:param instance: The optional model instance to inspect.
:return: Whether the session has the desired permissions.
:rtype: bool
"""
if session.user_is_admin:
return True
return cls._session_has_read_access(session, instance=instance)
|
Check that the authenticated *session* has access to read the
specified model *instance*.
:param session: The authenticated session to check access for.
:param instance: The optional model instance to inspect.
:return: Whether the session has the desired permissions.
:rtype: bool
|
session_has_read_access
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/models.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/models.py
|
BSD-3-Clause
|
def session_has_read_prop_access(cls, session, prop, instance=None):
"""
Check that the authenticated *session* has access to read the property
of the specified model *instance*. This allows models to only explicitly
control which of their attributes can be read by a particular *session*.
:param session: The authenticated session to check access for.
:param instance: The optional model instance to inspect.
:return: Whether the session has the desired permissions.
:rtype: bool
"""
if session.user_is_admin:
return True
return cls._session_has_read_prop_access(session, prop, instance=instance)
|
Check that the authenticated *session* has access to read the property
of the specified model *instance*. This allows models to only explicitly
control which of their attributes can be read by a particular *session*.
:param session: The authenticated session to check access for.
:param instance: The optional model instance to inspect.
:return: Whether the session has the desired permissions.
:rtype: bool
|
session_has_read_prop_access
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/models.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/models.py
|
BSD-3-Clause
|
def session_has_update_access(cls, session, instance=None):
"""
Check that the authenticated *session* has access to update the
specified model *instance*.
:param session: The authenticated session to check access for.
:param instance: The optional model instance to inspect.
:return: Whether the session has the desired permissions.
:rtype: bool
"""
if session.user_is_admin:
return True
return cls._session_has_update_access(session, instance=instance)
|
Check that the authenticated *session* has access to update the
specified model *instance*.
:param session: The authenticated session to check access for.
:param instance: The optional model instance to inspect.
:return: Whether the session has the desired permissions.
:rtype: bool
|
session_has_update_access
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/models.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/models.py
|
BSD-3-Clause
|
def metatable(cls):
"""
Generate a :py:class:`.MetaTable` instance for this model class.
:return: The appropriate metadata for the table represented by this model.
:rtype: :py:class:`.MetaTable`
"""
columns = tuple(col.name for col in cls.__table__.columns)
return MetaTable(column_names=columns, model=cls, name=cls.__tablename__, table=cls.__table__)
|
Generate a :py:class:`.MetaTable` instance for this model class.
:return: The appropriate metadata for the table represented by this model.
:rtype: :py:class:`.MetaTable`
|
metatable
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/models.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/models.py
|
BSD-3-Clause
|
def __init__(self, namespace=None, order_by='created'):
"""
.. versionchanged:: 1.14.0
Added the *order_by* parameter.
:param str namespace: The unique identifier of this namespace.
:param str order_by: The attribute to order stored items by. This must be one of "created", "id", "key", or "modified".
"""
self.namespace = namespace
self.order_by = order_by
|
.. versionchanged:: 1.14.0
Added the *order_by* parameter.
:param str namespace: The unique identifier of this namespace.
:param str order_by: The attribute to order stored items by. This must be one of "created", "id", "key", or "modified".
|
__init__
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/storage.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/storage.py
|
BSD-3-Clause
|
def validate_credential(credential, campaign):
"""
Validate a *credential* object with regards to the configuration provided in
*campaign*. This uses :py:func:`~.validate_credential_fields` to validate
each field individually and then return either ``None``, ``True`` or
``False``. If no validation took place on any field, ``None`` is returned,
otherwise if any field was validated then a boolean is returned indicating
whether or not all validated (non-``None``) fields passed validation.
:param credential: The credential object to validate.
:param campaign: The campaign with the validation configuration.
:return: Either a boolean or ``None`` depending on the results.
"""
fields = validate_credential_fields(credential, campaign)
fields = tuple(getattr(fields, field) for field in CredentialCollection._fields)
if all(field is None for field in fields):
return None
return all(field is None or field is True for field in fields)
|
Validate a *credential* object with regards to the configuration provided in
*campaign*. This uses :py:func:`~.validate_credential_fields` to validate
each field individually and then return either ``None``, ``True`` or
``False``. If no validation took place on any field, ``None`` is returned,
otherwise if any field was validated then a boolean is returned indicating
whether or not all validated (non-``None``) fields passed validation.
:param credential: The credential object to validate.
:param campaign: The campaign with the validation configuration.
:return: Either a boolean or ``None`` depending on the results.
|
validate_credential
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/validation.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/validation.py
|
BSD-3-Clause
|
def validate_credential_fields(credential, campaign):
"""
Validate a *credential* object with regards to the configuration provided in
*campaign*. Each field in the *credential* object is validated and a new
:py:class:`~.CredentialCollection` is returned with it's fields set to the
results of the validation. A fields validation results are either ``None``,
``True`` or ``False``. If no validation took place on the field, either
because nothing was configured for it in *campaign*,or the validation
information was invalid (a malformed regex for example) the result will be
``None``. Otherwise, the result is either ``True`` or ``False`` for the
field depending on the validation.
:param credential: The credential object to validate.
:param campaign: The campaign with the validation configuration.
:return: A :py:class:`~.CredentialCollection` object with the fields set to the results of their respective validation.
:rtype: :py:class:`~.CredentialCollection`
"""
# note that this uses duck-typing so the *credential* object could be either a db_models.Credential instance or a
# db_validation.CredentialCollection instance
validated = True
results = {}
for field in CredentialCollection._fields:
results[field] = None # default to None (no validation occurred on this field)
regex = getattr(campaign, 'credential_regex_' + field, None)
if regex is None:
continue
try:
regex = re.compile(regex)
except re.error:
logger.warning("regex compile error while validating credential field: {0}".format(field), exc_info=True)
continue
value = getattr(credential, field)
if value is None:
validated = False
else:
validated = validated and regex.match(value) is not None
if not validated:
logger.debug("credential failed regex validation on field: {0}".format(field))
results[field] = validated
return CredentialCollection(**results)
|
Validate a *credential* object with regards to the configuration provided in
*campaign*. Each field in the *credential* object is validated and a new
:py:class:`~.CredentialCollection` is returned with it's fields set to the
results of the validation. A fields validation results are either ``None``,
``True`` or ``False``. If no validation took place on the field, either
because nothing was configured for it in *campaign*,or the validation
information was invalid (a malformed regex for example) the result will be
``None``. Otherwise, the result is either ``True`` or ``False`` for the
field depending on the validation.
:param credential: The credential object to validate.
:param campaign: The campaign with the validation configuration.
:return: A :py:class:`~.CredentialCollection` object with the fields set to the results of their respective validation.
:rtype: :py:class:`~.CredentialCollection`
|
validate_credential_fields
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/database/validation.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/database/validation.py
|
BSD-3-Clause
|
def info_has_read_prop_access(cls, info, model, field_name=None, instance=None):
"""
Check that the context provided by *info* has access to read the
specified property of the model. This can be used to ensure that
sessions which can not read a protected field can also not obtain
indirect access such as filtering or sorting by it.
:param info: The resolve information for this execution.
:type info: :py:class:`graphql.execution.base.ResolveInfo`
:param model: The SQLAlchemy model to check read-property access on.
:type model: :py:class:`sqlalchemy.ext.declarative.api.Base`
:param str field_name: The specific field name to check, otherwise ``info.field_name``.
:param instance: An optional instance of *model* to use for the access check.
:return: Whether or not the context is authorized to access the property.
:rtype: bool
"""
rpc_session = info.context.get('rpc_session')
if rpc_session is None:
return True
field_name = field_name or info.field_name
return model.session_has_read_prop_access(rpc_session, field_name, instance=instance)
|
Check that the context provided by *info* has access to read the
specified property of the model. This can be used to ensure that
sessions which can not read a protected field can also not obtain
indirect access such as filtering or sorting by it.
:param info: The resolve information for this execution.
:type info: :py:class:`graphql.execution.base.ResolveInfo`
:param model: The SQLAlchemy model to check read-property access on.
:type model: :py:class:`sqlalchemy.ext.declarative.api.Base`
:param str field_name: The specific field name to check, otherwise ``info.field_name``.
:param instance: An optional instance of *model* to use for the access check.
:return: Whether or not the context is authorized to access the property.
:rtype: bool
|
info_has_read_prop_access
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/graphql/middleware.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/graphql/middleware.py
|
BSD-3-Clause
|
def sa_get_relationship(session, model, name):
"""
Resolve the relationship on a SQLAlchemy model to either an object (in the
case of one-to-one relationships) or a query to all of the objects (in the
case of one-to-many relationships).
:param session: The SQLAlchemy session to associate the query with.
:param model: The SQLAlchemy model of the object associated with the relationship.
:param name: The name of the relationship as it exists in the *model*.
:return: Either the object or a SQLAlchemy query for the objects.
"""
mapper = sqlalchemy.inspect(model.__class__)
relationship = mapper.relationships[name]
foreign_model = db_models.database_tables[relationship.target.name].model
query = session.query(foreign_model)
if relationship.uselist:
column_name = relationship.primaryjoin.right.name
return query.filter(getattr(foreign_model, column_name) == model.id)
column_name = relationship.primaryjoin.left.name
query = query.filter(getattr(foreign_model, column_name) == getattr(model, relationship.primaryjoin.right.name))
return query.first()
|
Resolve the relationship on a SQLAlchemy model to either an object (in the
case of one-to-one relationships) or a query to all of the objects (in the
case of one-to-many relationships).
:param session: The SQLAlchemy session to associate the query with.
:param model: The SQLAlchemy model of the object associated with the relationship.
:param name: The name of the relationship as it exists in the *model*.
:return: Either the object or a SQLAlchemy query for the objects.
|
sa_get_relationship
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/graphql/types/database.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/graphql/types/database.py
|
BSD-3-Clause
|
def sa_object_resolver(attname, default_value, model, info, **kwargs):
"""
Resolve the attribute for the given SQLAlchemy model object. If the
attribute is a relationship, use :py:func:`.sa_get_relationship` to resolve
it.
:param str attname: The name of the attribute to resolve on the object.
:param default_value: The default value to return if the attribute is unavailable.
:param model: The SQLAlchemy model to resolve the attribute for.
:type model: :py:class:`sqlalchemy.ext.declarative.api.Base`
:param info: The resolve information for this execution.
:type info: :py:class:`graphql.execution.base.ResolveInfo`
"""
mapper = sqlalchemy.inspect(model.__class__)
if attname in mapper.relationships:
return sa_get_relationship(info.context['session'], model, attname)
return getattr(model, attname, default_value)
|
Resolve the attribute for the given SQLAlchemy model object. If the
attribute is a relationship, use :py:func:`.sa_get_relationship` to resolve
it.
:param str attname: The name of the attribute to resolve on the object.
:param default_value: The default value to return if the attribute is unavailable.
:param model: The SQLAlchemy model to resolve the attribute for.
:type model: :py:class:`sqlalchemy.ext.declarative.api.Base`
:param info: The resolve information for this execution.
:type info: :py:class:`graphql.execution.base.ResolveInfo`
|
sa_object_resolver
|
python
|
rsmusllp/king-phisher
|
king_phisher/server/graphql/types/database.py
|
https://github.com/rsmusllp/king-phisher/blob/master/king_phisher/server/graphql/types/database.py
|
BSD-3-Clause
|
def forward(
self,
sample: torch.FloatTensor,
timestep: Union[torch.Tensor, float, int],
encoder_hidden_states: torch.Tensor,
audio_embedding: Optional[torch.Tensor] = None,
class_labels: Optional[torch.Tensor] = None,
mask_cond_fea: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
full_mask: Optional[torch.Tensor] = None,
face_mask: Optional[torch.Tensor] = None,
lip_mask: Optional[torch.Tensor] = None,
motion_scale: Optional[torch.Tensor] = None,
down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
mid_block_additional_residual: Optional[torch.Tensor] = None,
return_dict: bool = True,
# start: bool = False,
) -> Union[UNet3DConditionOutput, Tuple]:
r"""
Args:
sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor
timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps
encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
Returns:
[`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
[`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When
returning a tuple, the first element is the sample tensor.
"""
# By default samples have to be AT least a multiple of the overall upsampling factor.
# The overall upsampling factor is equal to 2 ** (# num of upsampling layears).
# However, the upsampling interpolation output size can be forced to fit any upsampling size
# on the fly if necessary.
default_overall_up_factor = 2**self.num_upsamplers
# upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
forward_upsample_size = False
upsample_size = None
if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):
logger.info(
"Forward upsample size to force interpolation output size.")
forward_upsample_size = True
# prepare attention_mask
if attention_mask is not None:
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
attention_mask = attention_mask.unsqueeze(1)
# center input if necessary
if self.config.center_input_sample:
sample = 2 * sample - 1.0
# time
timesteps = timestep
if not torch.is_tensor(timesteps):
# This would be a good case for the `match` statement (Python 3.10+)
is_mps = sample.device.type == "mps"
if isinstance(timestep, float):
dtype = torch.float32 if is_mps else torch.float64
else:
dtype = torch.int32 if is_mps else torch.int64
timesteps = torch.tensor(
[timesteps], dtype=dtype, device=sample.device)
elif len(timesteps.shape) == 0:
timesteps = timesteps[None].to(sample.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timesteps = timesteps.expand(sample.shape[0])
t_emb = self.time_proj(timesteps)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might actually be running in fp16. so we need to cast here.
# there might be better ways to encapsulate this.
t_emb = t_emb.to(dtype=self.dtype)
emb = self.time_embedding(t_emb)
if self.class_embedding is not None:
if class_labels is None:
raise ValueError(
"class_labels should be provided when num_class_embeds > 0"
)
if self.config.class_embed_type == "timestep":
class_labels = self.time_proj(class_labels)
class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
emb = emb + class_emb
# pre-process
sample = self.conv_in(sample)
if mask_cond_fea is not None:
sample = sample + mask_cond_fea
# down
down_block_res_samples = (sample,)
for downsample_block in self.down_blocks:
if (
hasattr(downsample_block, "has_cross_attention")
and downsample_block.has_cross_attention
):
sample, res_samples = downsample_block(
hidden_states=sample,
temb=emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
full_mask=full_mask,
face_mask=face_mask,
lip_mask=lip_mask,
audio_embedding=audio_embedding,
motion_scale=motion_scale,
)
# print("")
else:
sample, res_samples = downsample_block(
hidden_states=sample,
temb=emb,
encoder_hidden_states=encoder_hidden_states,
# audio_embedding=audio_embedding,
)
# print("")
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
new_down_block_res_samples = ()
for down_block_res_sample, down_block_additional_residual in zip(
down_block_res_samples, down_block_additional_residuals
):
down_block_res_sample = (
down_block_res_sample + down_block_additional_residual
)
new_down_block_res_samples += (down_block_res_sample,)
down_block_res_samples = new_down_block_res_samples
# mid
sample = self.mid_block(
sample,
emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
full_mask=full_mask,
face_mask=face_mask,
lip_mask=lip_mask,
audio_embedding=audio_embedding,
motion_scale=motion_scale,
)
if mid_block_additional_residual is not None:
sample = sample + mid_block_additional_residual
# up
for i, upsample_block in enumerate(self.up_blocks):
is_final_block = i == len(self.up_blocks) - 1
res_samples = down_block_res_samples[-len(upsample_block.resnets):]
down_block_res_samples = down_block_res_samples[
: -len(upsample_block.resnets)
]
# if we have not reached the final block and need to forward the
# upsample size, we do it here
if not is_final_block and forward_upsample_size:
upsample_size = down_block_res_samples[-1].shape[2:]
if (
hasattr(upsample_block, "has_cross_attention")
and upsample_block.has_cross_attention
):
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
encoder_hidden_states=encoder_hidden_states,
upsample_size=upsample_size,
attention_mask=attention_mask,
full_mask=full_mask,
face_mask=face_mask,
lip_mask=lip_mask,
audio_embedding=audio_embedding,
motion_scale=motion_scale,
)
else:
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
upsample_size=upsample_size,
encoder_hidden_states=encoder_hidden_states,
# audio_embedding=audio_embedding,
)
# post-process
sample = self.conv_norm_out(sample)
sample = self.conv_act(sample)
sample = self.conv_out(sample)
if not return_dict:
return (sample,)
return UNet3DConditionOutput(sample=sample)
|
Args:
sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor
timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps
encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
Returns:
[`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
[`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When
returning a tuple, the first element is the sample tensor.
|
forward
|
python
|
fudan-generative-vision/hallo
|
hallo/models/unet_3d.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/hallo/models/unet_3d.py
|
MIT
|
def forward(self,):
"""
empty function to override abstract function of nn Module
"""
|
empty function to override abstract function of nn Module
|
forward
|
python
|
fudan-generative-vision/hallo
|
scripts/inference.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/inference.py
|
MIT
|
def get_modules(self):
"""
Simple method to avoid too-few-public-methods pylint error
"""
return {
"reference_unet": self.reference_unet,
"denoising_unet": self.denoising_unet,
"face_locator": self.face_locator,
"imageproj": self.imageproj,
"audioproj": self.audioproj,
}
|
Simple method to avoid too-few-public-methods pylint error
|
get_modules
|
python
|
fudan-generative-vision/hallo
|
scripts/inference.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/inference.py
|
MIT
|
def inference_process(args: argparse.Namespace):
"""
Perform inference processing.
Args:
args (argparse.Namespace): Command-line arguments.
This function initializes the configuration for the inference process. It sets up the necessary
modules and variables to prepare for the upcoming inference steps.
"""
# 1. init config
cli_args = filter_non_none(vars(args))
config = OmegaConf.load(args.config)
config = OmegaConf.merge(config, cli_args)
source_image_path = config.source_image
driving_audio_path = config.driving_audio
save_path = config.save_path
if not os.path.exists(save_path):
os.makedirs(save_path)
motion_scale = [config.pose_weight, config.face_weight, config.lip_weight]
# 2. runtime variables
device = torch.device(
"cuda") if torch.cuda.is_available() else torch.device("cpu")
if config.weight_dtype == "fp16":
weight_dtype = torch.float16
elif config.weight_dtype == "bf16":
weight_dtype = torch.bfloat16
elif config.weight_dtype == "fp32":
weight_dtype = torch.float32
else:
weight_dtype = torch.float32
# 3. prepare inference data
# 3.1 prepare source image, face mask, face embeddings
img_size = (config.data.source_image.width,
config.data.source_image.height)
clip_length = config.data.n_sample_frames
face_analysis_model_path = config.face_analysis.model_path
with ImageProcessor(img_size, face_analysis_model_path) as image_processor:
source_image_pixels, \
source_image_face_region, \
source_image_face_emb, \
source_image_full_mask, \
source_image_face_mask, \
source_image_lip_mask = image_processor.preprocess(
source_image_path, save_path, config.face_expand_ratio)
# 3.2 prepare audio embeddings
sample_rate = config.data.driving_audio.sample_rate
assert sample_rate == 16000, "audio sample rate must be 16000"
fps = config.data.export_video.fps
wav2vec_model_path = config.wav2vec.model_path
wav2vec_only_last_features = config.wav2vec.features == "last"
audio_separator_model_file = config.audio_separator.model_path
with AudioProcessor(
sample_rate,
fps,
wav2vec_model_path,
wav2vec_only_last_features,
os.path.dirname(audio_separator_model_file),
os.path.basename(audio_separator_model_file),
os.path.join(save_path, "audio_preprocess")
) as audio_processor:
audio_emb, audio_length = audio_processor.preprocess(driving_audio_path, clip_length)
# 4. build modules
sched_kwargs = OmegaConf.to_container(config.noise_scheduler_kwargs)
if config.enable_zero_snr:
sched_kwargs.update(
rescale_betas_zero_snr=True,
timestep_spacing="trailing",
prediction_type="v_prediction",
)
val_noise_scheduler = DDIMScheduler(**sched_kwargs)
sched_kwargs.update({"beta_schedule": "scaled_linear"})
vae = AutoencoderKL.from_pretrained(config.vae.model_path)
reference_unet = UNet2DConditionModel.from_pretrained(
config.base_model_path, subfolder="unet")
denoising_unet = UNet3DConditionModel.from_pretrained_2d(
config.base_model_path,
config.motion_module_path,
subfolder="unet",
unet_additional_kwargs=OmegaConf.to_container(
config.unet_additional_kwargs),
use_landmark=False,
)
face_locator = FaceLocator(conditioning_embedding_channels=320)
image_proj = ImageProjModel(
cross_attention_dim=denoising_unet.config.cross_attention_dim,
clip_embeddings_dim=512,
clip_extra_context_tokens=4,
)
audio_proj = AudioProjModel(
seq_len=5,
blocks=12, # use 12 layers' hidden states of wav2vec
channels=768, # audio embedding channel
intermediate_dim=512,
output_dim=768,
context_tokens=32,
).to(device=device, dtype=weight_dtype)
audio_ckpt_dir = config.audio_ckpt_dir
# Freeze
vae.requires_grad_(False)
image_proj.requires_grad_(False)
reference_unet.requires_grad_(False)
denoising_unet.requires_grad_(False)
face_locator.requires_grad_(False)
audio_proj.requires_grad_(False)
reference_unet.enable_gradient_checkpointing()
denoising_unet.enable_gradient_checkpointing()
net = Net(
reference_unet,
denoising_unet,
face_locator,
image_proj,
audio_proj,
)
m,u = net.load_state_dict(
torch.load(
os.path.join(audio_ckpt_dir, "net.pth"),
map_location="cpu",
),
)
assert len(m) == 0 and len(u) == 0, "Fail to load correct checkpoint."
print("loaded weight from ", os.path.join(audio_ckpt_dir, "net.pth"))
# 5. inference
pipeline = FaceAnimatePipeline(
vae=vae,
reference_unet=net.reference_unet,
denoising_unet=net.denoising_unet,
face_locator=net.face_locator,
scheduler=val_noise_scheduler,
image_proj=net.imageproj,
)
pipeline.to(device=device, dtype=weight_dtype)
audio_emb = process_audio_emb(audio_emb)
source_image_pixels = source_image_pixels.unsqueeze(0)
source_image_face_region = source_image_face_region.unsqueeze(0)
source_image_face_emb = source_image_face_emb.reshape(1, -1)
source_image_face_emb = torch.tensor(source_image_face_emb)
source_image_full_mask = [
(mask.repeat(clip_length, 1))
for mask in source_image_full_mask
]
source_image_face_mask = [
(mask.repeat(clip_length, 1))
for mask in source_image_face_mask
]
source_image_lip_mask = [
(mask.repeat(clip_length, 1))
for mask in source_image_lip_mask
]
times = audio_emb.shape[0] // clip_length
tensor_result = []
generator = torch.manual_seed(42)
for t in range(times):
print(f"[{t+1}/{times}]")
if len(tensor_result) == 0:
# The first iteration
motion_zeros = source_image_pixels.repeat(
config.data.n_motion_frames, 1, 1, 1)
motion_zeros = motion_zeros.to(
dtype=source_image_pixels.dtype, device=source_image_pixels.device)
pixel_values_ref_img = torch.cat(
[source_image_pixels, motion_zeros], dim=0) # concat the ref image and the first motion frames
else:
motion_frames = tensor_result[-1][0]
motion_frames = motion_frames.permute(1, 0, 2, 3)
motion_frames = motion_frames[0-config.data.n_motion_frames:]
motion_frames = motion_frames * 2.0 - 1.0
motion_frames = motion_frames.to(
dtype=source_image_pixels.dtype, device=source_image_pixels.device)
pixel_values_ref_img = torch.cat(
[source_image_pixels, motion_frames], dim=0) # concat the ref image and the motion frames
pixel_values_ref_img = pixel_values_ref_img.unsqueeze(0)
audio_tensor = audio_emb[
t * clip_length: min((t + 1) * clip_length, audio_emb.shape[0])
]
audio_tensor = audio_tensor.unsqueeze(0)
audio_tensor = audio_tensor.to(
device=net.audioproj.device, dtype=net.audioproj.dtype)
audio_tensor = net.audioproj(audio_tensor)
pipeline_output = pipeline(
ref_image=pixel_values_ref_img,
audio_tensor=audio_tensor,
face_emb=source_image_face_emb,
face_mask=source_image_face_region,
pixel_values_full_mask=source_image_full_mask,
pixel_values_face_mask=source_image_face_mask,
pixel_values_lip_mask=source_image_lip_mask,
width=img_size[0],
height=img_size[1],
video_length=clip_length,
num_inference_steps=config.inference_steps,
guidance_scale=config.cfg_scale,
generator=generator,
motion_scale=motion_scale,
)
tensor_result.append(pipeline_output.videos)
tensor_result = torch.cat(tensor_result, dim=2)
tensor_result = tensor_result.squeeze(0)
tensor_result = tensor_result[:, :audio_length]
output_file = config.output
# save the result after all iteration
tensor_to_video(tensor_result, output_file, driving_audio_path)
return output_file
|
Perform inference processing.
Args:
args (argparse.Namespace): Command-line arguments.
This function initializes the configuration for the inference process. It sets up the necessary
modules and variables to prepare for the upcoming inference steps.
|
inference_process
|
python
|
fudan-generative-vision/hallo
|
scripts/inference.py
|
https://github.com/fudan-generative-vision/hallo/blob/master/scripts/inference.py
|
MIT
|
def make_tfrecord_loaders(args):
"""Load train/val/test dataset from shuffled TFRecords"""
import data_utils.tf_dl
data_set_args = {'batch_size': args.batch_size,
'max_seq_len': args.seq_length,
'max_preds_per_seq': args.max_preds_per_seq,
'train': True,
'num_workers': max(args.num_workers, 1),
'seed': args.seed + args.rank + 1,
'threaded_dl': args.num_workers > 0
}
train = data_utils.tf_dl.TFRecordDataLoader(args.train_data,
**data_set_args)
data_set_args['train'] = False
if args.eval_seq_length is not None:
data_set_args['max_seq_len'] = args.eval_seq_length
if args.eval_max_preds_per_seq is not None:
data_set_args['max_preds_per_seq'] = args.eval_max_preds_per_seq
valid = None
if args.valid_data is not None:
valid = data_utils.tf_dl.TFRecordDataLoader(args.valid_data,
**data_set_args)
test = None
if args.test_data is not None:
test = data_utils.tf_dl.TFRecordDataLoader(args.test_data,
**data_set_args)
tokenizer = data_utils.make_tokenizer(args.tokenizer_type,
train,
args.tokenizer_path,
args.vocab_size,
args.tokenizer_model_type,
cache_dir=args.cache_dir)
return (train, valid, test), tokenizer
|
Load train/val/test dataset from shuffled TFRecords
|
make_tfrecord_loaders
|
python
|
THUDM/GLM
|
configure_data.py
|
https://github.com/THUDM/GLM/blob/master/configure_data.py
|
MIT
|
def get_split(args):
"""
Get dataset splits from comma separated string list
"""
splits = []
if args.split.find(',') != -1:
splits = [float(s) for s in args.split.split(',')]
elif args.split.find('/') != -1:
splits = [float(s) for s in args.split.split('/')]
else:
splits = [float(args.split)]
split_total = sum(splits)
if split_total < 1.:
splits.append(1 - split_total)
while len(splits) < 3:
splits.append(0.)
splits = splits[:3]
if args.valid_data is not None:
splits[1] = 0.
if args.test_data is not None:
splits[2] = 0.
final_sum = sum(splits)
return [s / final_sum for s in splits]
|
Get dataset splits from comma separated string list
|
get_split
|
python
|
THUDM/GLM
|
configure_data.py
|
https://github.com/THUDM/GLM/blob/master/configure_data.py
|
MIT
|
def configure_data():
"""add cmdline flags for configuring datasets"""
# These are options that are used by data_utils, but are either
# deprecated or not meant to be exposed to the command line user.
# These options are intneded to be set in code by specific scripts.
defaults = {
'world_size': 1,
'rank': -1,
'persist_state': 0,
'lazy': False,
'transpose': False,
'data_set_type': 'supervised',
'seq_length': 256,
'eval_seq_length': 256,
'samples_per_shard': 100
}
return DataConfig(defaults=defaults)
|
add cmdline flags for configuring datasets
|
configure_data
|
python
|
THUDM/GLM
|
configure_data.py
|
https://github.com/THUDM/GLM/blob/master/configure_data.py
|
MIT
|
def process_batch(batch, args):
"""Process batch and produce inputs for the model."""
keys = ["text", "label"]
if args.pretrained_bert:
keys += ["padding_mask", "types"]
else:
keys += ["mask", "position"]
if args.cloze_eval:
if args.fast_decode:
keys += ["dec_text", "dec_position", "dec_mask", "dec_target", "dec_logit_mask"]
else:
keys += ["target", "logit_mask"]
if args.segment_length > 0:
keys += ["segment_id"]
if args.continuous_prompt:
keys += ["prompt_pos"]
if args.variable_num_choices:
keys.append("loss_mask")
# Broadcast data.
datatype = torch.int64
data_b = mpu.broadcast_data(keys, batch, datatype)
if "padding_mask" in data_b:
attention_mask = data_b['padding_mask'].float().cuda().contiguous()
if args.fp16:
attention_mask = attention_mask.half()
data_b["padding_mask"] = attention_mask
return data_b
|
Process batch and produce inputs for the model.
|
process_batch
|
python
|
THUDM/GLM
|
finetune_glm.py
|
https://github.com/THUDM/GLM/blob/master/finetune_glm.py
|
MIT
|
def finetune_forward_step(batch, model, args, timers, mems):
"""Simple forward step with cross-entropy loss."""
# Get the batch.
timers('batch generator').start()
try:
batch_ = next(batch)
except BaseException:
batch_ = batch
data = process_batch(batch_, args)
timers('batch generator').stop()
# Forward model.
if args.pretrained_bert:
tokens, types, labels, attention_mask = data['text'], data['types'], data['label'], data['padding_mask']
logits = model(tokens, token_type_ids=types, attention_mask=attention_mask, checkpoint_activations=True)
elif args.cloze_eval:
tokens, labels, position_ids = data['text'], data['label'], data['position']
attention_mask = data['mask']
if not args.fast_decode:
target_ids, logit_mask = data['target'], data['logit_mask']
if args.continuous_prompt:
prompt_pos = data["prompt_pos"]
result = model(tokens, position_ids, attention_mask, target_ids, logit_mask, prompt_pos=prompt_pos)
else:
result = model(tokens, position_ids, attention_mask, target_ids, logit_mask)
if not args.multi_token:
logits, lm_logits, *mems = result
else:
logits, *mems = result
else:
dec_input_ids, dec_position_ids, dec_attention_mask = data['dec_text'], data['dec_position'], data[
'dec_mask']
dec_target_ids, dec_logit_mask = data['dec_target'], data['dec_logit_mask']
logits, *mems = model(tokens, position_ids, attention_mask, dec_input_ids, dec_position_ids,
dec_attention_mask, dec_target_ids, dec_logit_mask)
else:
tokens, labels, position_ids, attention_mask = data['text'], data['label'], data['position'], data['mask']
logits, *mems = model(tokens, position_ids, attention_mask)
if args.adapet:
batch_size, num_classes = logits.size()[:2]
label_mask = torch.ones(batch_size, num_classes, device=logits.device)
label_mask.scatter_(1, labels.unsqueeze(1), -1.0)
if "loss_mask" in data:
loss_mask = data["loss_mask"]
label_mask = label_mask * loss_mask
loss = logits.contiguous().float() * label_mask
loss = loss.sum() / batch_size
else:
if "segment_id" in data:
from torch_scatter import scatter_sum
if "loss_mask" in data:
logits = logits * data["loss_mask"]
logits = scatter_sum(logits, data["segment_id"], dim=1)
elif "loss_mask" in data:
loss_mask = data["loss_mask"]
logits = logits * loss_mask - 10000.0 * (1.0 - loss_mask)
if args.loss_func == "cross_entropy":
# Cross-entropy loss.
loss_func = torch.nn.CrossEntropyLoss()
loss = loss_func(logits.contiguous().float(), labels)
elif args.loss_func == "hinge":
correct_logits = logits[range(logits.size(0)), labels]
hinge_loss = 1 + logits - correct_logits.unsqueeze(1)
hinge_loss[hinge_loss < 0.0] = 0.0
loss = hinge_loss.sum(dim=1).mean() - 1.0
elif args.loss_func == "generative" or args.loss_func == "mix":
batch_size = logits.size(0)
loss = - logits[range(batch_size), labels].mean()
if args.loss_func == "mix":
loss_func = torch.nn.CrossEntropyLoss()
loss = loss + loss_func(logits.contiguous().float(), labels)
else:
raise NotImplementedError
# Reduce loss for logging.
return loss, mems, 'bert'
|
Simple forward step with cross-entropy loss.
|
finetune_forward_step
|
python
|
THUDM/GLM
|
finetune_glm.py
|
https://github.com/THUDM/GLM/blob/master/finetune_glm.py
|
MIT
|
def _build_infinite_size_dataloader(dataloader):
"""Build a looped dataloader with infinite size."""
iterator = dataloader.__iter__()
while True:
try:
yield iterator.__next__()
except StopIteration:
iterator = dataloader.__iter__()
|
Build a looped dataloader with infinite size.
|
_build_infinite_size_dataloader
|
python
|
THUDM/GLM
|
finetune_glm.py
|
https://github.com/THUDM/GLM/blob/master/finetune_glm.py
|
MIT
|
def finetune(args, train_valid_datasets_provider, model_kwargs, forward_step=finetune_forward_step,
end_of_epoch_callback_provider=None):
"""Main finetune function used across all tasks."""
global tokenizer
timers = Timers()
tokenizer = prepare_tokenizer(args)
pretrain_glm.tokenizer = tokenizer
if args.save:
args.save = os.path.join(args.save, args.experiment_name)
# Train and validation data loaders.
timers('train/valid/test dataset/dataloder').start()
train_dataloader, valid_dataloader = None, None
train_block_dataloader, valid_block_dataloader = None, None
if train_valid_datasets_provider is not None and args.epochs > 0:
if mpu.get_model_parallel_rank() == 0:
train_dataset, valid_dataset = train_valid_datasets_provider(args, tokenizer)
train_dataloader, valid_dataloader = _build_train_valid_dataloaders(train_dataset, valid_dataset, args)
if args.no_validation:
valid_dataloader = None
train_iters = torch.cuda.LongTensor([len(train_dataloader)])
else:
train_iters = torch.cuda.LongTensor([0])
torch.distributed.broadcast(train_iters, mpu.get_model_parallel_src_rank(),
group=mpu.get_model_parallel_group())
if mpu.get_model_parallel_rank() != 0:
args.train_iters_per_epoch = train_iters[0].item()
args.train_iters = args.epochs * args.train_iters_per_epoch
train_dataloader = FakeDataloader(args.train_iters_per_epoch)
if args.no_validation:
valid_dataloader = None
else:
valid_dataloader = FakeDataloader(None)
if args.block_lm_ratio > 0.0:
if mpu.get_model_parallel_rank() == 0:
train_block_dataset, valid_block_dataset = train_valid_datasets_provider(args, tokenizer,
pattern_text=True)
train_block_dataloader = make_data_loader(train_block_dataset, tokenizer,
args.batch_size * mpu.get_data_parallel_world_size(),
args.train_iters, args, shuffle=True,
block_collate=True)
valid_block_dataloader = make_data_loader(valid_block_dataset, tokenizer,
args.batch_size * mpu.get_data_parallel_world_size(), (
args.train_iters // args.eval_interval + 1) * args.eval_iters,
args, shuffle=True, block_collate=True)
else:
train_block_dataloader = FakeDataloader(args.train_iters)
valid_block_dataloader = FakeDataloader(None)
train_block_dataloader, valid_block_dataloader = iter(train_block_dataloader), iter(valid_block_dataloader)
timers('train/valid/test dataset/dataloder').stop()
# Build calback function.
timers('callback function').start()
end_of_epoch_callback, end_of_train_callback = None, None
if end_of_epoch_callback_provider is not None:
if train_valid_datasets_provider is not None and args.epochs > 0 and not args.no_validation:
end_of_epoch_callback = end_of_epoch_callback_provider(args, tokenizer, is_test=False)
end_of_train_callback = end_of_epoch_callback_provider(args, tokenizer, is_test=True)
timers('callback function').stop()
# Build model, optimizer and learning rate scheduler.
timers('model and optimizer').start()
model, optimizer, lr_scheduler = setup_model_and_optimizer(args, **model_kwargs)
timers('model and optimizer').stop()
# If pretrained checkpoint is provided and we have not trained for
# any iteration (i.e., iteration is zero), then load the pretrained
# checkpoint.
timers('pretrained checkpoint').start()
if args.load_pretrained is not None and not args.pretrained_bert:
task_tokens = None
if args.continuous_prompt and args.prompt_init:
if mpu.get_model_parallel_rank() == 0:
dataset = train_dataloader.dataset
processor, pvp = dataset.processor, dataset.pvp
task_tokens = []
for label in processor.get_labels():
verbalizer = pvp.verbalize(label)[0]
verbalizer_ids = tokenizer.EncodeAsIds(verbalizer).tokenization
task_tokens += verbalizer_ids
print_rank_0("Task tokens: " + tokenizer.DecodeIds(task_tokens))
num_task_tokens = len(task_tokens)
else:
num_task_tokens, task_tokens = 0, []
num_task_tokens = torch.cuda.LongTensor([num_task_tokens])
torch.distributed.broadcast(num_task_tokens, mpu.get_model_parallel_src_rank(),
group=mpu.get_model_parallel_group())
num_task_tokens = num_task_tokens.item()
if num_task_tokens > 0:
if mpu.get_model_parallel_rank() == 0:
task_tokens = torch.cuda.LongTensor(task_tokens)
else:
task_tokens = torch.empty(num_task_tokens, device=torch.cuda.current_device(), dtype=torch.long)
torch.distributed.broadcast(task_tokens, mpu.get_model_parallel_src_rank(),
group=mpu.get_model_parallel_group())
task_tokens = task_tokens.tolist()
with FileLock(os.path.join(pathlib.Path.home(), "checkpoint_lock"), timeout=-1):
load_pretrained(model, args.load_pretrained, args, task_tokens=task_tokens)
# This is critical when only model is loaded. We should make sure
# master parameters are also updated.
if args.fp16 and optimizer is not None:
if args.deepspeed:
optimizer.refresh_fp32_params()
else:
optimizer._model_params_to_master_params()
if args.load is not None:
with FileLock(os.path.join(pathlib.Path.home(), "checkpoint_lock"), timeout=-1):
load_checkpoint(model, optimizer, lr_scheduler, args, no_deepspeed=args.no_deepspeed_load)
# This is critical when only model is loaded. We should make sure
# master parameters are also updated.
if args.fp16 and optimizer is not None:
if args.deepspeed:
optimizer.refresh_fp32_params()
else:
optimizer._model_params_to_master_params()
torch.distributed.barrier()
timers('pretrained checkpoint').stop()
args.iteration = 0
summary_writer = None
if torch.distributed.get_rank() == 0:
args.log_dir = get_log_dir(base=args.summary_dir, name=args.experiment_name)
if os.path.exists(os.path.join(args.log_dir, "test_results.json")) and args.load is None and not args.overwrite:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.log_dir))
summary_writer = get_sample_writer(log_dir=args.log_dir, iteration=args.iteration)
print_and_save_args(args, verbose=True, log_dir=args.log_dir)
# Print setup timing.
print_rank_0('done with setups ...')
timers.log(['train/valid/test dataset/dataloder', 'callback function',
'model and optimizer', 'pretrained checkpoint'])
print_rank_0('training ...')
# Finetune the model.
score_dict = None
if train_dataloader is not None and args.epochs > 0:
if args.block_lm_ratio > 0.0:
forward_step = mix_forward_step
best_iteration = _train(model, optimizer, lr_scheduler, forward_step,
(train_dataloader, train_block_dataloader), (valid_dataloader, valid_block_dataloader),
end_of_epoch_callback, args, timers,
summary_writer=summary_writer)
if end_of_train_callback is not None and best_iteration is not None:
with FileLock(os.path.join(pathlib.Path.home(), "checkpoint_lock"), timeout=-1):
args.load = os.path.join(args.save, "best")
load_checkpoint(model, optimizer, lr_scheduler, args, no_load_optim=True, no_deepspeed=True)
args.load = None
torch.distributed.barrier()
if end_of_train_callback is not None:
score_dict = end_of_train_callback(model, epoch=-1, output_predictions=True)
# Or just evaluate.
else:
if end_of_train_callback is not None:
print_rank_0('evaluation only mode, setting epoch to -1')
score_dict = end_of_train_callback(model, epoch=-1, output_predictions=True)
if score_dict is not None and torch.distributed.get_rank() == 0:
score_dict.update({"type": "test"})
with open(os.path.join(args.log_dir, "test_results.json"), "w") as output:
output.write(json.dumps(score_dict) + "\n")
print_rank_0('done :-)')
|
Main finetune function used across all tasks.
|
finetune
|
python
|
THUDM/GLM
|
finetune_glm.py
|
https://github.com/THUDM/GLM/blob/master/finetune_glm.py
|
MIT
|
def add(self, hyp: torch.LongTensor, sum_logprobs: float, mems=None):
"""
Add a new hypothesis to the list.
"""
score = sum_logprobs / (max(hyp.shape[-1], 1) ** self.length_penalty)
if len(self) < self.num_beams or score > self.worst_score:
self.beams.append((score, hyp, mems))
if len(self) > self.num_beams:
sorted_next_scores = sorted([(s, idx) for idx, (s, _, _) in enumerate(self.beams)])
del self.beams[sorted_next_scores[0][1]]
self.worst_score = sorted_next_scores[1][0]
else:
self.worst_score = min(score, self.worst_score)
|
Add a new hypothesis to the list.
|
add
|
python
|
THUDM/GLM
|
generation_utils.py
|
https://github.com/THUDM/GLM/blob/master/generation_utils.py
|
MIT
|
def is_done(self, best_sum_logprobs: float, cur_len: int) -> bool:
"""
If there are enough hypotheses and that none of the hypotheses being generated can become better than the worst
one in the heap, then we are done with this sentence.
"""
if len(self) < self.num_beams:
return False
elif self.early_stopping:
return True
else:
cur_score = best_sum_logprobs / cur_len ** self.length_penalty
ret = self.worst_score >= cur_score
return ret
|
If there are enough hypotheses and that none of the hypotheses being generated can become better than the worst
one in the heap, then we are done with this sentence.
|
is_done
|
python
|
THUDM/GLM
|
generation_utils.py
|
https://github.com/THUDM/GLM/blob/master/generation_utils.py
|
MIT
|
def evaluate_and_print_results(prefix, data_iterator, model,
args, timers, forward_step_func, verbose=False, step=None, summary_writer=None):
"""Helper function to evaluate and dump results on screen."""
lm_loss, gpt_loss, bert_loss, sent_loss, multi_loss = evaluate(data_iterator, model, args, timers, verbose=verbose,
forward_step_func=forward_step_func)
lm_ppl = math.exp(min(20, lm_loss))
report_evaluate_metrics(summary_writer, prefix, lm_loss, lm_ppl, gpt_loss, bert_loss, sent_loss, multi_loss, step)
return lm_loss
|
Helper function to evaluate and dump results on screen.
|
evaluate_and_print_results
|
python
|
THUDM/GLM
|
pretrain_glm.py
|
https://github.com/THUDM/GLM/blob/master/pretrain_glm.py
|
MIT
|
def get_train_val_test_data(args, tokenizer):
"""Load the data on rank zero and boradcast number of tokens to all GPUS."""
(train_data, val_data, test_data) = (None, None, None)
# Data loader only on rank 0 of each model parallel group.
if mpu.get_model_parallel_rank() == 0:
data_config = configure_data()
if args.block_lm:
data_set_type = "Block"
elif args.transformer_xl:
data_set_type = "GPT-XL"
else:
data_set_type = "GPT2"
data_config.set_defaults(data_set_type=data_set_type, transpose=False)
train_data, val_data, test_data = data_config.apply(args, tokenizer)
data_counts = torch.cuda.LongTensor([int(args.do_train), int(args.do_valid), int(args.do_test)])
else:
data_counts = torch.cuda.LongTensor([0, 0, 0])
# Broadcast num tokens.
torch.distributed.broadcast(data_counts,
mpu.get_model_parallel_src_rank(),
group=mpu.get_model_parallel_group())
args.do_train = data_counts[0].item()
args.do_valid = data_counts[1].item()
args.do_test = data_counts[2].item()
return train_data, val_data, test_data
|
Load the data on rank zero and boradcast number of tokens to all GPUS.
|
get_train_val_test_data
|
python
|
THUDM/GLM
|
pretrain_glm.py
|
https://github.com/THUDM/GLM/blob/master/pretrain_glm.py
|
MIT
|
def print_params_min_max_norm(optimizer, iteration):
"""Print min, max, and norm of all parameters."""
index = 0
rank = torch.distributed.get_rank()
string = 'iteration, rank, index, model-parallel,min, max, norm\n'
optimizer_ = optimizer
if isinstance(optimizer, FP16_Optimizer):
optimizer_ = optimizer.optimizer
for param_group in optimizer_.param_groups:
for param in param_group['params']:
index += 1
min_ = param.data.min()
max_ = param.data.max()
norm = param.data.norm()
string += '{:7d}, {:4d}, {:4d}, {:2d}, '.format(
iteration, rank, index, int(param.model_parallel))
string += '{:.6E}, {:.6E}, {:.6E}\n'.format(min_, max_, norm)
print(string, flush=True)
|
Print min, max, and norm of all parameters.
|
print_params_min_max_norm
|
python
|
THUDM/GLM
|
utils.py
|
https://github.com/THUDM/GLM/blob/master/utils.py
|
MIT
|
def load_weights(src, dst, dst2src=False):
"""
Loads weights from src to dst via in place copy.
src is a huggingface gpt2model, while dst is one of our models.
dst2src=True loads parameters from our models into huggingface's.
^dst2src is still untested
"""
conv_layer = 'Conv1D' in str(type(src))
for n, p in src.named_parameters():
if dst2src:
data = dst._parameters[n].data
load = p.data
else:
data = p.data
load = dst._parameters[n].data
if conv_layer and 'weight' in n:
data = data.t().contiguous()
load.copy_(data)
|
Loads weights from src to dst via in place copy.
src is a huggingface gpt2model, while dst is one of our models.
dst2src=True loads parameters from our models into huggingface's.
^dst2src is still untested
|
load_weights
|
python
|
THUDM/GLM
|
utils.py
|
https://github.com/THUDM/GLM/blob/master/utils.py
|
MIT
|
def move_weights(our, oai, dst2src=False):
"""
Loads weights from `oai` to `our` via in place copy.
`oai` is a huggingface gpt2model, while `our` is one of our models.
dst2src=True loads parameters from our models into huggingface's.
^dst2src=True is still untested
"""
# while isinstance(our, (torchDDP, model.distributed.DistributedDataParallel, FP16_Module)):
# our=our.module
transformer_model = oai.transformer
load_weights(transformer_model.ln_f, our.transformer.final_layernorm, dst2src)
load_weights(transformer_model.wte, our.word_embeddings, dst2src)
load_weights(transformer_model.wpe, our.position_embeddings, dst2src)
for our_layer, oai_layer in zip(our.transformer.layers, oai.transformer.h):
load_transformer_layer(our_layer, oai_layer, dst2src)
|
Loads weights from `oai` to `our` via in place copy.
`oai` is a huggingface gpt2model, while `our` is one of our models.
dst2src=True loads parameters from our models into huggingface's.
^dst2src=True is still untested
|
move_weights
|
python
|
THUDM/GLM
|
utils.py
|
https://github.com/THUDM/GLM/blob/master/utils.py
|
MIT
|
def split_ds(ds, split=None, shuffle=True, save_splits=None, load_splits=None):
"""
Split a dataset into subsets given proportions of how
much to allocate per split. If a split is 0% returns None for that split.
Purpose: Useful for creating train/val/test splits
Arguments:
ds (Dataset or array-like): Data to be split.
split (1D array-like): proportions to split `ds`. `sum(splits) != 0`
shuffle (boolean): Randomly split dataset. Default: True
save_splits: save split indices to file
load_splits: load split indices from file
"""
if split is None:
split = [.8, .2, .0]
split_sum = sum(split)
if split_sum == 0:
raise Exception('Split cannot sum to 0.')
split = np.array(split)
split /= split_sum
ds_len = len(ds)
inds = np.arange(ds_len)
if shuffle:
rng = np.random.RandomState(1234)
rng.shuffle(inds)
if load_splits is not None:
inds = np.load(load_splits)
assert len(inds) == ds_len
print_rank_0(f"Load split indices from {load_splits}")
elif save_splits is not None:
if torch.distributed.get_rank() == 0:
np.save(save_splits, inds)
print(f"Save split indices to {save_splits}")
start_idx = 0
residual_idx = 0
rtn_ds = [None] * len(split)
for i, f in enumerate(split):
if f != 0:
proportion = ds_len * split[i]
residual_idx += proportion % 1
split_ = int(int(proportion) + residual_idx)
split_inds = inds[start_idx:start_idx + max(split_, 1)]
rtn_ds[i] = SplitDataset(ds, split_inds)
start_idx += split_
residual_idx %= 1
return rtn_ds
|
Split a dataset into subsets given proportions of how
much to allocate per split. If a split is 0% returns None for that split.
Purpose: Useful for creating train/val/test splits
Arguments:
ds (Dataset or array-like): Data to be split.
split (1D array-like): proportions to split `ds`. `sum(splits) != 0`
shuffle (boolean): Randomly split dataset. Default: True
save_splits: save split indices to file
load_splits: load split indices from file
|
split_ds
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def __getitem__(self, index):
"""process+tokenize string and return string,label,and stringlen"""
x = self.X[index]
if self.tokenizer is not None:
x = self.tokenizer.EncodeAsIds(x, self.preprocess_fn)
elif self.preprocess_fn is not None:
x = self.preprocess_fn(x)
y = self.Y[index]
if isinstance(y, str):
if self.tokenizer is not None:
y = self.tokenizer.EncodeAsIds(y, self.preprocess_fn)
elif self.preprocess_fn is not None:
y = self.preprocess_fn(y)
return {'text': x, 'length': len(x), 'label': y}
|
process+tokenize string and return string,label,and stringlen
|
__getitem__
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def write(self, writer_gen=None, path=None, skip_header=False):
"""
given a generator of metrics for each of the data points X_i,
write the metrics, text, and labels to a csv file
"""
if path is None:
path = self.path + '.results'
print('generating csv at ' + path)
with open(path, 'w') as csvfile:
c = csv.writer(csvfile, delimiter=self.delim)
if writer_gen is not None:
# if first item of generator is a header of what the metrics mean then write header to csv file
if not skip_header:
header = (self.label_key,) + tuple(next(writer_gen)) + (self.text_key,)
c.writerow(header)
for i, row in enumerate(writer_gen):
row = (self.Y[i],) + tuple(row) + (self.X[i],)
c.writerow(row)
else:
c.writerow([self.label_key, self.text_key])
for row in zip(self.Y, self.X):
c.writerow(row)
|
given a generator of metrics for each of the data points X_i,
write the metrics, text, and labels to a csv file
|
write
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def __getitem__(self, index):
"""gets the index'th string from the dataset"""
x = self.X[index]
if self.tokenizer is not None:
x = self.tokenizer.EncodeAsIds(x, self.preprocess_fn)
elif self.preprocess_fn is not None:
x = self.preprocess_fn(x)
y = self.Y[index]
if isinstance(y, str):
if self.tokenizer is not None:
y = self.tokenizer.EncodeAsIds(y, self.preprocess_fn)
elif self.preprocess_fn is not None:
y = self.preprocess_fn(y)
return {'text': x, 'length': len(x), 'label': y}
|
gets the index'th string from the dataset
|
__getitem__
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def write(self, writer_gen=None, path=None, skip_header=False):
"""
given a generator of metrics for each of the data points X_i,
write the metrics, text, and labels to a json file
"""
if path is None:
path = self.path + '.results'
jsons = []
if writer_gen is not None:
# if first item of generator is a header of what the metrics mean then write header to csv file
def gen_helper():
keys = {}
keys[0] = self.label_key
if not skip_header:
for idx, k in enumerate(tuple(next(writer_gen))):
keys[idx + 1] = k
for i, row in enumerate(writer_gen):
if i == 0 and skip_header:
for idx, _ in enumerate(row):
keys[idx + 1] = 'metric_%d' % (idx,)
j = {}
for idx, v in enumerate((self.Y[i],) + tuple(row)):
k = keys[idx]
j[k] = v
yield j
else:
def gen_helper():
for y in self.Y:
j = {}
j[self.label_key] = y
yield j
def out_stream():
for i, j in enumerate(gen_helper()):
j[self.text_key] = self.X[i]
yield j
self.save_json_stream(path, out_stream())
|
given a generator of metrics for each of the data points X_i,
write the metrics, text, and labels to a json file
|
write
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def __init__(self, ds, tokenizer,
max_seq_len=1024,
sample_across_doc=True,
non_sentence_start=0.0, filter_english=False, **kwargs):
"""
sentence_start: the stripped article must start with a complete sentence
"""
self.ds = ds
self.ds_len = len(self.ds)
self.num_samples = 1000 * self.ds_len
self.max_seq_len = max_seq_len
self.tokenizer = tokenizer
self.sample_across_doc = sample_across_doc
self.non_sentence_start = non_sentence_start
self.filter_english = filter_english
self.weighting, self.total_len = None, None
self.is_lazy = False
if self.filter_english:
import fasttext
self.model = fasttext.load_model('/mnt/lid.176.bin')
print_rank_0("Load language detection model")
if hasattr(self.ds, 'is_lazy') and self.ds.is_lazy:
self.is_lazy = True
self.init_weighting()
|
sentence_start: the stripped article must start with a complete sentence
|
__init__
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def sentence_tokenize(self, sent, sentence_num=0, beginning=False, ending=False):
"""tokenize sentence and get token types"""
tokens = self.tokenizer.EncodeAsIds(sent).tokenization
str_type = 'str' + str(sentence_num)
token_types = [self.tokenizer.get_type(str_type).Id] * len(tokens)
return tokens, token_types
|
tokenize sentence and get token types
|
sentence_tokenize
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def get_doc(self, idx):
"""gets text of document corresponding to idx"""
rtn = self.ds[idx]
if isinstance(rtn, dict):
rtn = rtn['text']
return rtn
|
gets text of document corresponding to idx
|
get_doc
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def create_random_sentencepair(self, target_seq_length, rng, np_rng):
"""
fetches a random sentencepair corresponding to rng state similar to
https://github.com/google-research/bert/blob/master/create_pretraining_data.py#L248-L294
"""
is_random_next = None
curr_strs = []
curr_str_types = []
curr_len = 0
while curr_len < 1:
curr_len = 0
doc_a = None
while doc_a is None:
if self.weighted:
# doc_a_idx = np_rng.choice(self.ds_len, p=self.weighting)
doc_a_idx = self.get_weighted_samples(np_rng)
else:
doc_a_idx = rng.randint(0, self.ds_len - 1)
doc_a = self.sentence_split(self.get_doc(doc_a_idx))
if not doc_a:
doc_a = None
random_start_a = rng.randint(0, len(doc_a) - 1)
while random_start_a < len(doc_a):
sentence = doc_a[random_start_a]
sentence, sentence_types = self.sentence_tokenize(sentence, 0, random_start_a == 0,
random_start_a == len(doc_a))
curr_strs.append(sentence)
curr_str_types.append(sentence_types)
curr_len += len(sentence)
if random_start_a == len(doc_a) - 1 or curr_len >= target_seq_length:
break
random_start_a = (random_start_a + 1)
if curr_strs:
num_a = 1
if len(curr_strs) >= 2:
num_a = rng.randint(0, len(curr_strs))
tokens_a = []
token_types_a = []
for j in range(num_a):
tokens_a.extend(curr_strs[j])
token_types_a.extend(curr_str_types[j])
tokens_b = []
token_types_b = []
is_random_next = False
if len(curr_strs) == 1 or rng.random() < 0.5:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
b_len = 0
while b_len < 1:
doc_b = None
while doc_b is None:
doc_b_idx = rng.randint(0, self.ds_len - 2)
doc_b_idx += int(doc_b_idx >= doc_a_idx)
doc_b = self.sentence_split(self.get_doc(doc_b_idx))
if not doc_b:
doc_b = None
random_start_b = rng.randint(0, len(doc_b) - 1)
while random_start_b < len(doc_b):
sentence_b = doc_b[random_start_b]
new_b_tokens, new_b_types = self.sentence_tokenize(sentence_b, 1, random_start_b == 0,
random_start_b == len(doc_b))
b_len += len(new_b_tokens)
tokens_b.extend(new_b_tokens)
token_types_b.extend(new_b_types)
if len(tokens_b) >= target_b_length:
break
random_start_b = (random_start_b + 1)
else:
is_random_next = False
for j in range(num_a, len(curr_strs)):
tokens_b.extend(curr_strs[j])
token_types_b.extend(curr_str_types[j])
return (tokens_a, token_types_a), (tokens_b, token_types_b), is_random_next
|
fetches a random sentencepair corresponding to rng state similar to
https://github.com/google-research/bert/blob/master/create_pretraining_data.py#L248-L294
|
create_random_sentencepair
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def truncate_seq_pair(self, a, b, max_seq_len, rng):
"""
Truncate sequence pair according to original BERT implementation:
https://github.com/google-research/bert/blob/master/create_pretraining_data.py#L391
"""
tokens_a, token_types_a = a
tokens_b, token_types_b = b
max_num_tokens = max_seq_len - 3
while True:
len_a = len(tokens_a)
len_b = len(tokens_b)
total_length = len_a + len_b
if total_length <= max_num_tokens:
break
if len(tokens_a) > len(tokens_b):
trunc_tokens = tokens_a
trunc_types = token_types_a
else:
trunc_tokens = tokens_b
trunc_types = token_types_b
assert len(trunc_tokens) >= 1
if rng.random() < 0.5:
trunc_tokens.pop(0)
trunc_types.pop(0)
else:
trunc_tokens.pop()
trunc_types.pop()
return (tokens_a, token_types_a), (tokens_b, token_types_b)
|
Truncate sequence pair according to original BERT implementation:
https://github.com/google-research/bert/blob/master/create_pretraining_data.py#L391
|
truncate_seq_pair
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def mask_token(self, idx, tokens, types, vocab_words, rng):
"""
helper function to mask `idx` token from `tokens` according to
section 3.3.1 of https://arxiv.org/pdf/1810.04805.pdf
"""
label = tokens[idx]
if rng.random() < 0.8:
new_label = self.tokenizer.get_command('MASK').Id
else:
if rng.random() < 0.5:
new_label = label
else:
new_label = rng.choice(vocab_words)
tokens[idx] = new_label
return label
|
helper function to mask `idx` token from `tokens` according to
section 3.3.1 of https://arxiv.org/pdf/1810.04805.pdf
|
mask_token
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def pad_seq(self, seq):
"""helper function to pad sequence pair"""
num_pad = max(0, self.max_seq_len - len(seq))
pad_mask = [0] * len(seq) + [1] * num_pad
seq += [self.tokenizer.get_command('pad').Id] * num_pad
return seq, pad_mask
|
helper function to pad sequence pair
|
pad_seq
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def create_masked_lm_predictions(self, a, b, mask_lm_prob, max_preds_per_seq, vocab_words, rng):
"""
Mask sequence pair for BERT training according to:
https://github.com/google-research/bert/blob/master/create_pretraining_data.py#L338
"""
tokens_a, token_types_a = a
tokens_b, token_types_b = b
tokens = [self.tokenizer.get_command('ENC').Id] + tokens_a + [
self.tokenizer.get_command('sep').Id] + tokens_b + [self.tokenizer.get_command('sep').Id]
token_types = [token_types_a[0]] + token_types_a + [token_types_a[0]] + token_types_b + [token_types_b[0]]
len_a = len(tokens_a)
len_b = len(tokens_b)
cand_indices = [idx + 1 for idx in range(len_a)] + [idx + 2 + len_a for idx in range(len_b)]
rng.shuffle(cand_indices)
output_tokens, pad_mask = self.pad_seq(list(tokens))
output_types, _ = self.pad_seq(list(token_types))
num_to_predict = min(max_preds_per_seq, max(1, int(round(len(tokens) * mask_lm_prob))))
mask = [0] * len(output_tokens)
mask_labels = [-1] * len(output_tokens)
for idx in sorted(cand_indices[:num_to_predict]):
mask[idx] = 1
label = self.mask_token(idx, output_tokens, output_types, vocab_words, rng)
mask_labels[idx] = label
return (output_tokens, output_types), mask, mask_labels, pad_mask
|
Mask sequence pair for BERT training according to:
https://github.com/google-research/bert/blob/master/create_pretraining_data.py#L338
|
create_masked_lm_predictions
|
python
|
THUDM/GLM
|
data_utils/datasets.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/datasets.py
|
MIT
|
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
|
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
|
filename_to_url
|
python
|
THUDM/GLM
|
data_utils/file_utils.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/file_utils.py
|
MIT
|
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
|
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
|
cached_path
|
python
|
THUDM/GLM
|
data_utils/file_utils.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/file_utils.py
|
MIT
|
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
|
Split a full s3 path into the bucket name and path.
|
split_s3_path
|
python
|
THUDM/GLM
|
data_utils/file_utils.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/file_utils.py
|
MIT
|
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
|
Wrapper function for s3 requests in order to create more helpful error
messages.
|
s3_request
|
python
|
THUDM/GLM
|
data_utils/file_utils.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/file_utils.py
|
MIT
|
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w', encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
|
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
|
get_from_cache
|
python
|
THUDM/GLM
|
data_utils/file_utils.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/file_utils.py
|
MIT
|
def read_set_from_file(filename):
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
|
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
|
read_set_from_file
|
python
|
THUDM/GLM
|
data_utils/file_utils.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/file_utils.py
|
MIT
|
def exists_lazy(path, data_type='data'):
"""
Check if we've already made a lazy version of this file for the `data_type` field.
"""
if not os.path.exists(get_lazy_path(path)):
return False
contents = os.listdir(get_lazy_path(path))
if data_type not in contents:
return False
if data_type + '.len.pkl' not in contents:
return False
return True
|
Check if we've already made a lazy version of this file for the `data_type` field.
|
exists_lazy
|
python
|
THUDM/GLM
|
data_utils/lazy_loader.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/lazy_loader.py
|
MIT
|
def SetTokenizer(self, tokenizer):
"""
logic to set and remove (set to None) tokenizer.
combines preprocessing/tokenization into one callable.
"""
if tokenizer is None:
if not hasattr(self, '_tokenizer'):
self._tokenizer = tokenizer
else:
self._tokenizer = tokenizer
self.map_fn = ProcessorTokenizer(tokenizer, self.process_fn)
|
logic to set and remove (set to None) tokenizer.
combines preprocessing/tokenization into one callable.
|
SetTokenizer
|
python
|
THUDM/GLM
|
data_utils/lazy_loader.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/lazy_loader.py
|
MIT
|
def __getitem__(self, index):
"""
read file and splice strings based on string ending array `self.ends`
"""
if not isinstance(index, slice):
if index == 0:
start = 0
else:
start = self.ends[index - 1]
end = self.ends[index]
rtn = self.file_read(start, end)
if self.map_fn is not None:
rtn = self.map_fn(rtn)
else:
# if slice, fetch strings with 1 diskread and then splice in memory
chr_lens = self.ends[index]
if index.start == 0 or index.start is None:
start = 0
else:
start = self.ends[index.start - 1]
stop = chr_lens[-1]
strings = self.file_read(start, stop)
rtn = split_strings(strings, start, chr_lens)
if self.map_fn is not None:
rtn = [self.map_fn(s) for s in rtn]
return rtn
|
read file and splice strings based on string ending array `self.ends`
|
__getitem__
|
python
|
THUDM/GLM
|
data_utils/lazy_loader.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/lazy_loader.py
|
MIT
|
def _batch(self, batch):
"""extracts samples only pertaining to this worker's batch"""
start = self.rank*self.batch_size//self.world_size
end = (self.rank+1)*self.batch_size//self.world_size
return batch[start:end]
|
extracts samples only pertaining to this worker's batch
|
_batch
|
python
|
THUDM/GLM
|
data_utils/samplers.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/samplers.py
|
MIT
|
def data_iterator(self, _iter, wrap_around=False):
"""iterates through data and handles wrap around"""
for i, idx in enumerate(_iter):
if i < self.wrap_around%self.batch_size:
continue
if wrap_around:
self.wrap_around += 1
self.wrap_around %= self.batch_size
yield idx
|
iterates through data and handles wrap around
|
data_iterator
|
python
|
THUDM/GLM
|
data_utils/samplers.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/samplers.py
|
MIT
|
def make_tokenizer(tokenizer_type, corpus, model_path=None, vocab_size=None, model_type=None, pad_token=0,
character_coverage=1.0, command_tokens=None, type_tokens=None, fix_command_token=False, **kwargs):
"""
Helper function to instantiate a tokenizer given common combinations of options.
"""
tokenizer_class = tokenizer_type
if isinstance(tokenizer_class, str):
tokenizer_class = eval(tokenizer_class)
if tokenizer_class is BertWordPieceTokenizer:
return BertWordPieceTokenizer(model_type, **kwargs)
elif tokenizer_class is GPT2BPETokenizer:
if model_type is None:
model_type = 'gpt2'
return GPT2BPETokenizer(model_type, **kwargs)
elif tokenizer_class is ChineseSPTokenizer:
return ChineseSPTokenizer(fix_command_token=fix_command_token, **kwargs)
text_tokenizer = tokenizer_class(corpus=corpus, vocab_size=vocab_size, model_path=model_path, model_type=model_type,
pad_token=pad_token, character_coverage=character_coverage)
return Tokenizer(text_tokenizer, command_tokens, type_tokens)
|
Helper function to instantiate a tokenizer given common combinations of options.
|
make_tokenizer
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def EncodeAsIds(self, text, process_fn=None):
"""
encode text using text tokenizer and shift Id values for command tokens
"""
processed_text = text
if process_fn is not None:
processed_text = process_fn(processed_text)
def split_on_token(tok_extended: CommandToken, text):
result = []
tok = tok_extended.token
split_text = text.split(tok)
for i, sub_text in enumerate(split_text):
# CommandToken can control whitespace stripping around them.
# We use them for GPT2 and Roberta to have different behavior depending on the special token
# Cf. https://github.com/huggingface/transformers/pull/2778
# and https://github.com/huggingface/transformers/issues/3788
# Strip white spaces on the right
if tok_extended.rstrip and i > 0:
# A bit counter-intuitive but we strip the left of the string
# since tok_extended.rstrip means the special token is eating all white spaces on its right
sub_text = sub_text.lstrip()
# Strip white spaces on the left
if tok_extended.lstrip and i < len(split_text) - 1:
sub_text = sub_text.rstrip() # Opposite here
if i == 0 and not sub_text:
result.append(tok)
elif i == len(split_text) - 1:
if sub_text:
result.append(sub_text)
else:
pass
else:
if sub_text:
result.append(sub_text)
result.append(tok)
return result
def split_on_tokens(tok_list, text):
if not text.strip():
return []
if not tok_list:
return self.text_tokenizer.encode(text)
tokenized_text = []
text_list = [text]
for tok in tok_list:
tokenized_text = []
for sub_text in text_list:
if sub_text not in self._command_token_tokens:
tokenized_text.extend(split_on_token(tok, sub_text))
else:
tokenized_text.append(sub_text)
text_list = tokenized_text
return list(
itertools.chain.from_iterable(
(
self._encode(token) if token not in self._command_token_tokens else [
self.command_token_map[token].Id] for token in tokenized_text
)
)
)
no_split_tokens = self._command_tokens
Ids = split_on_tokens(no_split_tokens, processed_text)
tokenization = Tokenization(Ids, processed_text, text)
tokenization.set_command_tokens(self._command_tokens)
return tokenization
|
encode text using text tokenizer and shift Id values for command tokens
|
EncodeAsIds
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def EncodeAsTokens(self, text, process_fn=None):
"""
encode text as tokens using text tokenizer
"""
tokenization = self.text_tokenizer.EncodeAsTokens(text, process_fn=process_fn)
tokenization.set_command_tokens(self._command_tokens)
return tokenization
|
encode text as tokens using text tokenizer
|
EncodeAsTokens
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def IdToToken(self, Id, type_token=False):
"""convert Id to token accounting for command and type tokens"""
if isinstance(Id, (TypeToken, CommandToken)):
return Id.token
if type_token:
return self.type_id_map[Id].token
if Id < self.num_command_tokens:
return self.command_id_map[Id].token
return self.text_tokenizer.IdToToken(Id - self.num_command_tokens)
|
convert Id to token accounting for command and type tokens
|
IdToToken
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def TokenToId(self, token, type_token=False):
"""convert token to Id accounting for command and type tokens"""
if isinstance(token, (TypeToken, CommandToken)):
return token.Id
if type_token:
return self.type_token_map[token].Id
if token in self.command_token_map:
return self.command_token_map[token].Id
return self.text_tokenizer.TokenToId(token) + self.num_command_tokens
|
convert token to Id accounting for command and type tokens
|
TokenToId
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def DecodeIds(self, Ids, type_token=False):
"""
convert Ids to tokens accounting for command and type tokens, tokens
are joined and returned as a string.
"""
if type_token:
return ' '.join(Id.token if isinstance(Id, TypeToken) else self.type_id_map[Id].token for Id in Ids)
rtn_strs = []
current_str = []
if isinstance(Ids, Tokenization):
Ids = Ids.tokenization
for Id in Ids:
if isinstance(Id, CommandToken):
rtn_strs.append(self.text_tokenizer.DecodeIds(current_str))
current_str = []
rtn_strs.append(Id.token)
elif Id < self.num_command_tokens:
rtn_strs.append(self.text_tokenizer.DecodeIds(current_str))
current_str = []
rtn_strs.append(self.command_id_map[Id].token)
else:
current_str.append(Id - self.num_command_tokens)
if current_str != []:
rtn_strs.append(self.text_tokenizer.DecodeIds(current_str))
return ' '.join(rtn_strs)
|
convert Ids to tokens accounting for command and type tokens, tokens
are joined and returned as a string.
|
DecodeIds
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def DecodeTokens(self, Tokens, type_token=False):
"""
convert tokens to a string accounting for command and type tokens.
"""
if type_token:
return ' '.join(t.token if isinstance(t, TypeToken) else t for t in Tokens)
rtn_strs = []
current_str = []
if isinstance(Tokens, Tokenization):
Tokens = Tokens.tokenization
for t in Tokens:
if isinstance(t, CommandToken):
rtn_strs.append(self.text_tokenizer.DecodeTokens(current_str))
current_str = []
rtn_strs.append(t.token)
elif t in self.command_token_map:
rtn_strs.append(self.text_tokenizer.DecodeTokens(current_str))
current_str = []
rtn_strs.append(t)
else:
current_str.append(t)
if current_str != []:
rtn_strs.append(self.text_tokenizer.DecodeTokens(current_str))
return ' '.join(rtn_strs)
|
convert tokens to a string accounting for command and type tokens.
|
DecodeTokens
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def DecodeIds(self, Ids):
"""converts ascii ids to tokens before joining them into text"""
if isinstance(Ids, Tokenization):
Ids = Ids.tokenization
return ''.join([self.IdToToken(tok) for tok in Ids])
|
converts ascii ids to tokens before joining them into text
|
DecodeIds
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
def DecodeTokens(self, Tokens):
"""just concatenates ascii tokens into text"""
if isinstance(Tokens, Tokenization):
Tokens = Tokens.tokenization
return ''.join(Tokens)
|
just concatenates ascii tokens into text
|
DecodeTokens
|
python
|
THUDM/GLM
|
data_utils/tokenization.py
|
https://github.com/THUDM/GLM/blob/master/data_utils/tokenization.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.