code stringlengths 66 870k | docstring stringlengths 19 26.7k | func_name stringlengths 1 138 | language stringclasses 1
value | repo stringlengths 7 68 | path stringlengths 5 324 | url stringlengths 46 389 | license stringclasses 7
values |
|---|---|---|---|---|---|---|---|
def test_tts_convert_with_voice_settings() -> None:
"""Test TTS with custom voice settings."""
client = ElevenLabs()
audio_generator = client.text_to_speech.convert(
text=DEFAULT_TEXT,
voice_id=DEFAULT_VOICE,
model_id=DEFAULT_MODEL,
voice_settings=VoiceSettings(stability=0.71, similarity_boost=0.5, style=0.0, use_speaker_boost=True),
)
audio = b"".join(audio_generator)
assert isinstance(audio, bytes), "TTS with voice settings should return bytes"
if not IN_GITHUB:
play(audio) | Test TTS with custom voice settings. | test_tts_convert_with_voice_settings | python | elevenlabs/elevenlabs-python | tests/test_tts.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/tests/test_tts.py | MIT |
def test_voice_preview_generation():
"""Test generating voice previews from description."""
client = ElevenLabs()
# Test parameters
description = "A warm and friendly female voice with a slight British accent, speaking clearly and professionally"
sample_text = "This is a test message that needs to be at least one hundred characters long to meet the API requirements. Here it is."
previews = client.text_to_voice.create_previews(voice_description=description, text=sample_text)
assert hasattr(previews, "previews"), "Response should have 'previews' attribute"
assert len(previews.previews) > 0, "Should receive at least one preview"
assert hasattr(previews.previews[0], "generated_voice_id"), "Preview should contain generated_voice_id"
assert hasattr(previews.previews[0], "audio_base_64"), "Preview should contain audio_base_64" | Test generating voice previews from description. | test_voice_preview_generation | python | elevenlabs/elevenlabs-python | tests/test_ttv.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/tests/test_ttv.py | MIT |
def test_construct_event_valid_signature():
"""Test webhook event construction with valid signature."""
# Setup
client = ElevenLabs()
webhook_secret = "test_secret"
payload = {"event_type": "speech.completed", "id": "123456"}
# Create a valid signature
body = json.dumps(payload)
timestamp = str(int(time.time()))
message = f"{timestamp}.{body}"
signature = "v0=" + hmac.new(
webhook_secret.encode('utf-8'),
message.encode('utf-8'),
hashlib.sha256
).hexdigest()
sig_header = f"t={timestamp},{signature}"
# Verify event construction
event = client.webhooks.construct_event(body, sig_header, webhook_secret)
assert event == payload, "Event should match the original payload" | Test webhook event construction with valid signature. | test_construct_event_valid_signature | python | elevenlabs/elevenlabs-python | tests/test_webhooks.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/tests/test_webhooks.py | MIT |
def test_construct_event_missing_signature():
"""Test webhook event construction with missing signature header."""
client = ElevenLabs()
webhook_secret = "test_secret"
payload = {"event_type": "speech.completed", "id": "123456"}
with pytest.raises(BadRequestError) as excinfo:
client.webhooks.construct_event(payload, "", webhook_secret)
assert "Missing signature header" in str(excinfo.value) | Test webhook event construction with missing signature header. | test_construct_event_missing_signature | python | elevenlabs/elevenlabs-python | tests/test_webhooks.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/tests/test_webhooks.py | MIT |
def test_construct_event_invalid_signature_format():
"""Test webhook event construction with invalid signature format."""
client = ElevenLabs()
webhook_secret = "test_secret"
payload = {"event_type": "speech.completed", "id": "123456"}
body = json.dumps(payload)
sig_header = "invalid_format"
with pytest.raises(BadRequestError) as excinfo:
client.webhooks.construct_event(body, sig_header, webhook_secret)
assert "No signature hash found with expected scheme v0" in str(excinfo.value) | Test webhook event construction with invalid signature format. | test_construct_event_invalid_signature_format | python | elevenlabs/elevenlabs-python | tests/test_webhooks.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/tests/test_webhooks.py | MIT |
def test_construct_event_expired_timestamp():
"""Test webhook event construction with expired timestamp."""
client = ElevenLabs()
webhook_secret = "test_secret"
payload = {"event_type": "speech.completed", "id": "123456"}
# Create an expired timestamp (31 minutes old)
expired_time = int(time.time()) - 31 * 60
timestamp = str(expired_time)
body = json.dumps(payload)
message = f"{timestamp}.{body}"
signature = "v0=" + hmac.new(
webhook_secret.encode('utf-8'),
message.encode('utf-8'),
hashlib.sha256
).hexdigest()
sig_header = f"t={timestamp},{signature}"
with pytest.raises(BadRequestError) as excinfo:
client.webhooks.construct_event(body, sig_header, webhook_secret)
assert "Timestamp outside the tolerance zone" in str(excinfo.value) | Test webhook event construction with expired timestamp. | test_construct_event_expired_timestamp | python | elevenlabs/elevenlabs-python | tests/test_webhooks.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/tests/test_webhooks.py | MIT |
def test_construct_event_invalid_signature():
"""Test webhook event construction with invalid signature."""
client = ElevenLabs()
webhook_secret = "test_secret"
payload = {"event_type": "speech.completed", "id": "123456"}
body = json.dumps(payload)
timestamp = str(int(time.time()))
sig_header = f"t={timestamp},v0=invalid_signature"
with pytest.raises(BadRequestError) as excinfo:
client.webhooks.construct_event(body, sig_header, webhook_secret)
assert "Signature hash does not match" in str(excinfo.value) | Test webhook event construction with invalid signature. | test_construct_event_invalid_signature | python | elevenlabs/elevenlabs-python | tests/test_webhooks.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/tests/test_webhooks.py | MIT |
def test_construct_event_missing_secret():
"""Test webhook event construction with missing secret."""
client = ElevenLabs()
payload = {"event_type": "speech.completed", "id": "123456"}
body = json.dumps(payload)
timestamp = str(int(time.time()))
sig_header = f"t={timestamp},v0=some_signature"
with pytest.raises(BadRequestError) as excinfo:
client.webhooks.construct_event(body, sig_header, "")
assert "Webhook secret not configured" in str(excinfo.value) | Test webhook event construction with missing secret. | test_construct_event_missing_secret | python | elevenlabs/elevenlabs-python | tests/test_webhooks.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/tests/test_webhooks.py | MIT |
def test_construct_event_mocked_time(mock_time):
"""Test webhook event construction with mocked time."""
mock_time.return_value = 1600000000
client = ElevenLabs()
webhook_secret = "test_secret"
payload = {"event_type": "speech.completed", "id": "123456"}
# Create a valid signature with fixed timestamp
body = json.dumps(payload)
timestamp = "1600000000"
message = f"{timestamp}.{body}"
signature = "v0=" + hmac.new(
webhook_secret.encode('utf-8'),
message.encode('utf-8'),
hashlib.sha256
).hexdigest()
sig_header = f"t={timestamp},{signature}"
# Verify event construction
event = client.webhooks.construct_event(body, sig_header, webhook_secret)
assert event == payload, "Event should match the original payload" | Test webhook event construction with mocked time. | test_construct_event_mocked_time | python | elevenlabs/elevenlabs-python | tests/test_webhooks.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/tests/test_webhooks.py | MIT |
def as_local_files(urls: Sequence[str]) -> Generator[str, None, None]:
"""Util to download files from urls and return local file paths"""
temp_files = []
for url in urls:
response = httpx.get(url)
temp_file = tempfile.NamedTemporaryFile()
temp_file.write(response.content)
temp_files.append(temp_file)
yield temp_file.name
# Remove the files
for temp_file in temp_files:
temp_file.close() | Util to download files from urls and return local file paths | as_local_files | python | elevenlabs/elevenlabs-python | tests/utils.py | https://github.com/elevenlabs/elevenlabs-python/blob/master/tests/utils.py | MIT |
def create_oss_artifact(
path,
bucket=None,
accesskey_id=None,
accesskey_secret=None,
key=None,
endpoint=None,
is_global=False,
):
"""
Configure the object as OssArtifact
OSS configuration can be found
(https://www.alibabacloud.com/help/doc-detail/32027.htm)
:param path: the local path of container
:param bucket: oss bucket
:param accesskey_id: oss user id
:param accesskey_secret: oss user ky
:param key: key of oss object
:param endpoint: end point of oss
:return:
"""
return OssArtifact(
path,
accesskey_id,
accesskey_secret,
bucket,
key=key,
endpoint=endpoint,
is_global=is_global,
) |
Configure the object as OssArtifact
OSS configuration can be found
(https://www.alibabacloud.com/help/doc-detail/32027.htm)
:param path: the local path of container
:param bucket: oss bucket
:param accesskey_id: oss user id
:param accesskey_secret: oss user ky
:param key: key of oss object
:param endpoint: end point of oss
:return:
| create_oss_artifact | python | couler-proj/couler | couler/argo.py | https://github.com/couler-proj/couler/blob/master/couler/argo.py | Apache-2.0 |
def create_s3_artifact(
path,
bucket=None,
accesskey_id=None,
accesskey_secret=None,
key=None,
endpoint=None,
is_global=False,
insecure=False,
):
"""
Configure the object as S3Artifact
:param path: the local path of container
:param bucket: s3 bucket
:param accesskey_id: s3 user id
:param accesskey_secret: s3 user key
:param key: key of s3 object
:param endpoint: end point of s3
:param insecure: use HTTP instead of HTTPS when True
:return:
"""
return S3Artifact(
path,
accesskey_id,
accesskey_secret,
bucket,
key=key,
endpoint=endpoint,
is_global=is_global,
insecure=insecure,
) |
Configure the object as S3Artifact
:param path: the local path of container
:param bucket: s3 bucket
:param accesskey_id: s3 user id
:param accesskey_secret: s3 user key
:param key: key of s3 object
:param endpoint: end point of s3
:param insecure: use HTTP instead of HTTPS when True
:return:
| create_s3_artifact | python | couler-proj/couler | couler/argo.py | https://github.com/couler-proj/couler/blob/master/couler/argo.py | Apache-2.0 |
def obtain_secret(secret_keys, namespace="default", name=None, dry_run=False):
"""Get the key from secret in k8s, and return the secret name."""
secret_data = {}
for x in secret_keys:
secret_data[x] = ""
return create_secret(
secret_data,
namespace=namespace,
name=name,
dry_run=dry_run,
use_existing=True,
) | Get the key from secret in k8s, and return the secret name. | obtain_secret | python | couler-proj/couler | couler/argo.py | https://github.com/couler-proj/couler/blob/master/couler/argo.py | Apache-2.0 |
def create_secret(
secret_data,
namespace="default",
name=None,
dry_run=False,
use_existing=False,
artifact_secret=False,
):
"""Store the input dict as a secret in k8s, and return the secret name."""
secret = Secret(
namespace=namespace,
data=secret_data,
name=name,
dry_run=dry_run,
use_existing=use_existing,
artifact_secret=artifact_secret,
)
# avoid create the secret for same input dict
if secret.name not in states._secrets:
states._secrets[secret.name] = secret
return secret.name | Store the input dict as a secret in k8s, and return the secret name. | create_secret | python | couler-proj/couler | couler/argo.py | https://github.com/couler-proj/couler/blob/master/couler/argo.py | Apache-2.0 |
def check_name(name):
"""Check the name is valid or not"""
if len(name) > WorkflowCRD.NAME_MAX_LENGTH:
raise ValueError(
"Name is too long. Max length: {}, now: {}"
"".format(WorkflowCRD.NAME_MAX_LENGTH, len(name))
)
if "." in name:
raise ValueError("Name cannot include dot.")
if "_" in name:
raise ValueError("Name cannot include underscore.")
match_obj = re.match(WorkflowCRD.NAME_PATTERN, name)
if not match_obj:
raise ValueError(
"Name is invalid. Regex used for validation is %s"
% WorkflowCRD.NAME_PATTERN
) | Check the name is valid or not | check_name | python | couler-proj/couler | couler/argo_submitter.py | https://github.com/couler-proj/couler/blob/master/couler/argo_submitter.py | Apache-2.0 |
def config_defaults(name_salter=None, service_account: str = None):
"""
Config couler defaults.
:param name_salter: function to salt workflow names.
:param service_account: name of the default Kubernetes
ServiceAccount with which to run workflows
:return:
"""
if name_salter is not None:
states._workflow_name_salter = name_salter
if service_account is not None:
states.default_service_account = service_account |
Config couler defaults.
:param name_salter: function to salt workflow names.
:param service_account: name of the default Kubernetes
ServiceAccount with which to run workflows
:return:
| config_defaults | python | couler-proj/couler | couler/core/config.py | https://github.com/couler-proj/couler/blob/master/couler/core/config.py | Apache-2.0 |
def config_workflow(
name=None,
user_id=None,
timeout=None,
time_to_clean=None,
cluster_config_file=None,
cron_config=None,
service_account=None,
):
"""
Config some workflow-level information.
:param name: name of the workflow.
:param user_id: user information.
:param timeout: maximum running time(seconds).
:param time_to_clean: time to keep the workflow after completed(seconds).
:param cluster_config_file: cluster specific config
:param cron_config: for cron scheduling
:param service_account: name of the Kubernetes ServiceAccount which
runs this workflow
:return:
"""
if name is not None:
states.workflow.name = states._workflow_name_salter(name)
if user_id is not None:
states.workflow.user_id = user_id
if timeout is not None:
states.workflow.timeout = timeout
if time_to_clean is not None:
states.workflow.clean_ttl = time_to_clean
if cluster_config_file is not None:
import os
os.environ["couler_cluster_config"] = cluster_config_file
states.workflow.cluster_config = utils.load_cluster_config()
if cron_config is not None:
if isinstance(cron_config, OrderedDict):
raise SyntaxError("Cron config would be a dict")
if "schedule" not in cron_config:
raise SyntaxError("require the cron schedule")
schedule = cron_config["schedule"]
concurrency_policy = cron_config.get("concurrency_policy", "Allow")
successful_jobs_history_limit = cron_config.get(
"successful_jobs_history_limit", 3
)
failed_jobs_history_limit = cron_config.get(
"failed_jobs_history_limit", 1
)
starting_deadline_seconds = cron_config.get(
"starting_deadline_seconds", 10
)
suspend = cron_config.get("suspend", "false")
timezone = cron_config.get("timezone", "Asia/Shanghai")
_config_cron_workflow(
schedule,
concurrency_policy,
successful_jobs_history_limit,
failed_jobs_history_limit,
starting_deadline_seconds,
suspend,
timezone,
)
states.workflow.service_account = (
service_account
if service_account is not None
else states.default_service_account
) |
Config some workflow-level information.
:param name: name of the workflow.
:param user_id: user information.
:param timeout: maximum running time(seconds).
:param time_to_clean: time to keep the workflow after completed(seconds).
:param cluster_config_file: cluster specific config
:param cron_config: for cron scheduling
:param service_account: name of the Kubernetes ServiceAccount which
runs this workflow
:return:
| config_workflow | python | couler-proj/couler | couler/core/config.py | https://github.com/couler-proj/couler/blob/master/couler/core/config.py | Apache-2.0 |
def _config_cron_workflow(
schedule,
concurrency_policy='"Allow"',
successful_jobs_history_limit=3, # Default 3
failed_jobs_history_limit=1, # Default 1
starting_deadline_seconds=10,
suspend="false",
timezone="Asia/Shanghai",
):
"""
Config the CronWorkflow, see example
https://github.com/argoproj/argo/blob/master/docs/cron-workflows.md
"""
cron_config = {
"schedule": schedule,
"concurrencyPolicy": concurrency_policy,
"successfulJobsHistoryLimit": successful_jobs_history_limit,
"failedJobsHistoryLimit": failed_jobs_history_limit,
"startingDeadlineSeconds": starting_deadline_seconds,
"suspend": suspend,
"timezone": timezone,
}
states.workflow.config_cron_workflow(cron_config) |
Config the CronWorkflow, see example
https://github.com/argoproj/argo/blob/master/docs/cron-workflows.md
| _config_cron_workflow | python | couler-proj/couler | couler/core/config.py | https://github.com/couler-proj/couler/blob/master/couler/core/config.py | Apache-2.0 |
def run_script(
image,
command=None,
source=None,
args=None,
output=None,
input=None,
env=None,
secret=None,
resources=None,
timeout=None,
retry=None,
step_name=None,
image_pull_policy=None,
pool=None,
enable_ulogfs=True,
daemon=False,
volume_mounts=None,
working_dir=None,
node_selector=None,
cache=None,
):
"""
Generate an Argo script template. For example,
https://github.com/argoproj/argo/tree/master/examples#scripts--results.
:param image: Docker image name
:param command: entrypoint array
:param source: reference to function name that contains code to be executed
:param args: arguments to the step or task
:param output: output artifact for container output
:param input: input artifact for container input
:param env: environment variable
:param secret: secrets to mount as environment variables
:param resources: CPU or memory resource config dict
:param timeout: in seconds
:param retry: retry policy
:param step_name: used for annotating step .
:param image_pull_policy: one of ImagePullPolicy.[Always|Never|IfNotPresent] # noqa: E501
:param pool:
:param enable_ulogfs:
:param daemon:
:return:
"""
if source is None:
raise ValueError("Source must be provided")
func_name, caller_line = utils.invocation_location()
func_name = (
utils.argo_safe_name(step_name) if step_name is not None else func_name
)
if states.workflow.get_template(func_name) is None:
# Generate the inputs parameter for the template
if input is None:
input = []
if args is None and states._outputs_tmp is not None:
args = []
if args is not None:
if not isinstance(args, list):
args = [args]
# Handle case where args is a list of list type
# For example, [[Output, ]]
if (
isinstance(args, list)
and len(args) > 0
and isinstance(args[0], list)
and len(args[0]) > 0
and isinstance(args[0][0], Output)
):
args = args[0]
if states._outputs_tmp is not None:
args.extend(states._outputs_tmp)
# In case, the args include output artifact
# Place output artifact into the input
for arg in args:
if isinstance(arg, (OutputArtifact, OutputJob)):
input.append(arg)
# Automatically append emptyDir volume and volume mount to work with
# Argo k8sapi executor.
# More info: https://argoproj.github.io/argo/empty-dir/
if output is not None:
if not isinstance(output, list):
output = [output]
if volume_mounts is None:
volume_mounts = []
mounted_path = []
for i, out in enumerate(output):
path_to_mount = os.path.dirname(out.path)
# Avoid duplicate mount paths
if path_to_mount not in mounted_path:
volume_mounts.append(
VolumeMount("couler-out-dir-%s" % i, path_to_mount)
)
mounted_path.append(path_to_mount)
# Generate container and template
template = Script(
name=func_name,
image=image,
command=command,
source=source,
args=args,
env=env,
secret=states.get_secret(secret),
resources=resources,
image_pull_policy=image_pull_policy,
retry=retry,
timeout=timeout,
output=output,
input=input,
pool=pool,
enable_ulogfs=enable_ulogfs,
daemon=daemon,
volume_mounts=volume_mounts,
working_dir=working_dir,
node_selector=node_selector,
cache=cache,
)
states.workflow.add_template(template)
step_name = step_update_utils.update_step(
func_name, args, step_name, caller_line
)
# TODO: need to switch to use field `output` directly
step_templ = states.workflow.get_template(func_name)
_output = step_templ.to_dict().get("outputs", None)
_input = step_templ.to_dict().get("inputs", None)
rets = _script_output(step_name, func_name, _output)
states._steps_outputs[step_name] = rets
if proto_repr:
proto_repr.step_repr( # noqa: F841
step_name=step_name,
tmpl_name=func_name,
image=image,
command=command,
source=source,
script_output=rets,
args=args,
input=_input,
output=_output,
env=env,
resources=resources,
volume_mounts=volume_mounts,
cache=cache,
)
proto_repr.add_deps_to_step(step_name)
return rets |
Generate an Argo script template. For example,
https://github.com/argoproj/argo/tree/master/examples#scripts--results.
:param image: Docker image name
:param command: entrypoint array
:param source: reference to function name that contains code to be executed
:param args: arguments to the step or task
:param output: output artifact for container output
:param input: input artifact for container input
:param env: environment variable
:param secret: secrets to mount as environment variables
:param resources: CPU or memory resource config dict
:param timeout: in seconds
:param retry: retry policy
:param step_name: used for annotating step .
:param image_pull_policy: one of ImagePullPolicy.[Always|Never|IfNotPresent] # noqa: E501
:param pool:
:param enable_ulogfs:
:param daemon:
:return:
| run_script | python | couler-proj/couler | couler/core/run_templates.py | https://github.com/couler-proj/couler/blob/master/couler/core/run_templates.py | Apache-2.0 |
def run_container(
image,
command=None,
args=None,
output=None,
input=None,
env=None,
env_from=None,
secret=None,
resources=None,
timeout=None,
retry=None,
step_name=None,
image_pull_policy=None,
pool=None,
enable_ulogfs=True,
daemon=False,
volume_mounts=None,
working_dir=None,
node_selector=None,
cache=None,
parallelism=None,
):
"""
Generate an Argo container template. For example, the template whalesay
in https://github.com/argoproj/argo/tree/master/examples#hello-world.
:param image: Docker image name
:param command: entrypoint array
:param args: arguments to the step or task
:param output: output artifact for container output
:param input: input artifact for container input
:param env: environment variable
:param secret: secrets to mount as environment variables
:param resources: CPU or memory resource config dict
:param timeout: in seconds
:param retry: retry policy
:param step_name: used for annotating step .
:param image_pull_policy: one of ImagePullPolicy.[Always|Never|IfNotPresent] # noqa: E501
:param pool:
:param enable_ulogfs:
:param daemon:
:return:
"""
func_name, caller_line = utils.invocation_location()
func_name = (
utils.argo_safe_name(step_name) if step_name is not None else func_name
)
if states.workflow.get_template(func_name) is None:
# Generate the inputs parameter for the template
if input is None:
input = []
if args is None and states._outputs_tmp is not None:
args = []
if args is not None:
if not isinstance(args, list):
args = [args]
# Handle case where args is a list of list type
# For example, [[Output, ]]
if (
isinstance(args, list)
and len(args) > 0
and isinstance(args[0], list)
and len(args[0]) > 0
and isinstance(args[0][0], Output)
):
args = args[0]
if states._outputs_tmp is not None:
args.extend(states._outputs_tmp)
# In case, the args include output artifact
# Place output artifact into the input
for arg in args:
if isinstance(arg, (OutputArtifact, OutputJob)):
input.append(arg)
# Automatically append emptyDir volume and volume mount to work with
# Argo k8sapi executor.
# More info: https://argoproj.github.io/argo/empty-dir/
if output is not None:
if not isinstance(output, list):
output = [output]
if volume_mounts is None:
volume_mounts = []
mounted_path = []
for i, out in enumerate(output):
path_to_mount = os.path.dirname(out.path)
# Avoid duplicate mount paths
if path_to_mount not in mounted_path:
volume_mounts.append(
VolumeMount("couler-out-dir-%s" % i, path_to_mount)
)
mounted_path.append(path_to_mount)
# Generate container and template
template = Container(
name=func_name,
image=image,
command=command,
args=args,
env=env,
env_from=env_from,
secret=states.get_secret(secret),
resources=resources,
image_pull_policy=image_pull_policy,
retry=retry,
timeout=timeout,
output=output,
input=input,
pool=pool,
enable_ulogfs=enable_ulogfs,
daemon=daemon,
volume_mounts=volume_mounts,
working_dir=working_dir,
node_selector=node_selector,
cache=cache,
parallelism=parallelism,
)
states.workflow.add_template(template)
step_name = step_update_utils.update_step(
func_name, args, step_name, caller_line
)
# TODO: need to switch to use field `output` directly
step_templ = states.workflow.get_template(func_name)
_output = step_templ.to_dict().get("outputs", None)
_input = step_templ.to_dict().get("inputs", None)
rets = _container_output(step_name, func_name, _output)
states._steps_outputs[step_name] = rets
if proto_repr:
pb_step = proto_repr.step_repr( # noqa: F841
step_name=step_name,
tmpl_name=func_name,
image=image,
command=command,
source=None,
script_output=None,
args=args,
input=_input,
output=_output,
env=env,
resources=resources,
secret=states.get_secret(secret),
volume_mounts=volume_mounts,
cache=cache,
)
proto_repr.add_deps_to_step(step_name)
return rets |
Generate an Argo container template. For example, the template whalesay
in https://github.com/argoproj/argo/tree/master/examples#hello-world.
:param image: Docker image name
:param command: entrypoint array
:param args: arguments to the step or task
:param output: output artifact for container output
:param input: input artifact for container input
:param env: environment variable
:param secret: secrets to mount as environment variables
:param resources: CPU or memory resource config dict
:param timeout: in seconds
:param retry: retry policy
:param step_name: used for annotating step .
:param image_pull_policy: one of ImagePullPolicy.[Always|Never|IfNotPresent] # noqa: E501
:param pool:
:param enable_ulogfs:
:param daemon:
:return:
| run_container | python | couler-proj/couler | couler/core/run_templates.py | https://github.com/couler-proj/couler/blob/master/couler/core/run_templates.py | Apache-2.0 |
def _cleanup():
"""Cleanup the cached fields, just used for unit test.
"""
global _secrets, _update_steps_lock, _dag_caller_line, _upstream_dag_task, _upstream_dag_depends_logic, workflow, _steps_outputs # noqa: E501
global _exit_handler_enable, _when_prefix, _when_task, _while_steps, _concurrent_func_line # noqa: E501
_secrets = {}
_update_steps_lock = True
_dag_caller_line = None
_upstream_dag_task = None
_upstream_dag_depends_logic = None
_exit_handler_enable = False
_when_prefix = None
_when_task = None
_while_steps = OrderedDict()
_concurrent_func_line = -1
_steps_outputs = OrderedDict()
workflow.cleanup()
cleanup_proto_workflow() | Cleanup the cached fields, just used for unit test.
| _cleanup | python | couler-proj/couler | couler/core/states.py | https://github.com/couler-proj/couler/blob/master/couler/core/states.py | Apache-2.0 |
def _update_dag_tasks(
function_name,
caller_line,
dependencies,
depends_logic,
args=None,
template_name=None,
step_name=None,
):
"""
A task in DAG of Argo YAML contains name, related template and parameters.
Here we insert a single task into the global tasks.
"""
if step_name is None:
function_id = utils.invocation_name(function_name, caller_line)
else:
function_id = step_name
task_template = states.workflow.get_dag_task(function_id)
if task_template is None:
task_template = OrderedDict({"name": function_id})
if dependencies is not None and isinstance(dependencies, list):
if "dependencies" in task_template:
task_template["dependencies"].extend(dependencies)
else:
task_template["dependencies"] = dependencies
if depends_logic is not None:
task_template["depends"] = depends_logic
if template_name is None:
task_template["template"] = function_name
else:
task_template["template"] = template_name
# configure the args
if args is not None:
parameters, artifacts = _get_params_and_artifacts_from_args(
args, function_name, prefix="tasks"
)
if len(parameters) > 0:
task_template["arguments"] = OrderedDict()
task_template["arguments"]["parameters"] = parameters
if len(artifacts) > 0:
if "arguments" not in task_template:
task_template["arguments"] = OrderedDict()
task_template["arguments"]["artifacts"] = artifacts
else:
# step exist on the dag, thus, we update its dependency
if dependencies is not None:
if "dependencies" in task_template:
task_template["dependencies"].extend(dependencies)
else:
task_template["dependencies"] = [dependencies]
if depends_logic is not None:
task_template["depends"] = depends_logic
t_name = function_name if template_name is None else template_name
step = Step(name=function_id, template=t_name)
if states._exit_handler_enable:
if states._when_prefix is not None:
step.when = states._when_prefix
if function_id in states.workflow.exit_handler_step:
states.workflow.exit_handler_step.get(function_id).append(
step.to_dict()
)
else:
states.workflow.exit_handler_step[function_id] = [step.to_dict()]
elif states._when_prefix is not None:
step.when = states._when_prefix
if step.name not in states.workflow.dag_tasks.keys():
step_spec = step.to_dict()
step_spec["dependencies"] = [states._when_task]
states.workflow.dag_tasks[step.name] = step_spec
else:
states.workflow.update_dag_task(function_id, task_template)
# return the current task name
return function_id |
A task in DAG of Argo YAML contains name, related template and parameters.
Here we insert a single task into the global tasks.
| _update_dag_tasks | python | couler-proj/couler | couler/core/step_update_utils.py | https://github.com/couler-proj/couler/blob/master/couler/core/step_update_utils.py | Apache-2.0 |
def _update_steps(function_name, caller_line, args=None, template_name=None):
"""
A step in Argo YAML contains name, related template and parameters.
Here we insert a single step into the global steps.
"""
function_id = utils.invocation_name(function_name, caller_line)
# Update `steps` only if needed
if states._update_steps_lock:
name = function_id
if states._run_concurrent_lock:
_id = utils.invocation_name(template_name, caller_line)
name = "%s-%s" % (_id, states._concurrent_func_id)
if states._sub_steps is not None:
states._concurrent_func_id = states._concurrent_func_id + 1
t_name = function_name if template_name is None else template_name
step = Step(name=name, template=t_name)
if states._when_prefix is not None:
step.when = states._when_prefix
if args is not None:
parameters, artifacts = _get_params_and_artifacts_from_args(
args,
template_name
if states._run_concurrent_lock
else function_name,
prefix="steps",
)
if len(parameters) > 0:
step.arguments = OrderedDict()
step.arguments["parameters"] = parameters
if len(artifacts) > 0:
if step.arguments is None:
step.arguments = OrderedDict()
step.arguments["artifacts"] = artifacts
if states._condition_id is not None:
function_id = states._condition_id
if states._while_lock:
if function_id in states._while_steps:
states._while_steps.get(function_id).append(step.to_dict())
else:
states._while_steps[function_id] = [step.to_dict()]
else:
if states._sub_steps is not None:
if function_id in states._sub_steps:
states._sub_steps.get(function_id).append(step.to_dict())
else:
states._sub_steps[function_id] = [step.to_dict()]
elif states._exit_handler_enable is True:
if function_id in states.workflow.exit_handler_step:
states.workflow.exit_handler_step.get(function_id).append(
step.to_dict()
)
else:
states.workflow.exit_handler_step[function_id] = [
step.to_dict()
]
else:
states.workflow.add_step(function_id, step)
return step.name
else:
return function_id |
A step in Argo YAML contains name, related template and parameters.
Here we insert a single step into the global steps.
| _update_steps | python | couler-proj/couler | couler/core/step_update_utils.py | https://github.com/couler-proj/couler/blob/master/couler/core/step_update_utils.py | Apache-2.0 |
def argo_safe_name(name):
"""Some names are to be used in the Argo YAML file. For example,
the generateName and template name in
https://github.com/argoproj/argo/blob/master/examples/hello-world.yaml. As
Argo is to use the YAML as part of Kubernetes job description
YAML, these names must follow Kubernetes's convention -- no
period or underscore. This function replaces these prohibited
characters into dashes.
"""
if name is None:
return None
# Strip the unnecessary "<>" pattern that appears when the
# file is invoked via standard input, e.g. `cat run.py | python -u`,
# where `name` is "<stdin>".
if name.startswith("<") and name.endswith(">"):
name = name.strip("<|>")
# '_' and '.' are not allowed
return re.sub(r"_|\.", "-", name) | Some names are to be used in the Argo YAML file. For example,
the generateName and template name in
https://github.com/argoproj/argo/blob/master/examples/hello-world.yaml. As
Argo is to use the YAML as part of Kubernetes job description
YAML, these names must follow Kubernetes's convention -- no
period or underscore. This function replaces these prohibited
characters into dashes.
| argo_safe_name | python | couler-proj/couler | couler/core/utils.py | https://github.com/couler-proj/couler/blob/master/couler/core/utils.py | Apache-2.0 |
def invocation_location():
"""If a function A in file B calls function C, which in turn calls
invocation_location(), the call returns information about the invocation,
in particular, the caller's name "A" and the line number where A
calls C. Return (B + line_number) as function_name if A doesn't exist,
where users directly calls C in file B.
:return: a tuple of (function_name, invocation_line)
"""
stack = inspect.stack()
if len(stack) < 4:
line_number = stack[len(stack) - 1][2]
func_name = "%s-%d" % (
argo_safe_name(workflow_filename()),
line_number,
)
else:
func_name = argo_safe_name(stack[2][3])
line_number = stack[3][2]
# We need to strip the unnecessary "<>" pattern that appears when the
# function is invoked:
# 1. at module-level, e.g. `python -m module_name`, where `func_name`
# is "<module>".
# 2. via standard input, e.g. `cat run.py | python -u`, where `func_name`
# is "<stdin>".
if func_name.startswith("<") and func_name.endswith(">"):
func_name = "%s-%s" % (func_name.strip("<|>"), _get_uuid())
return func_name, line_number | If a function A in file B calls function C, which in turn calls
invocation_location(), the call returns information about the invocation,
in particular, the caller's name "A" and the line number where A
calls C. Return (B + line_number) as function_name if A doesn't exist,
where users directly calls C in file B.
:return: a tuple of (function_name, invocation_line)
| invocation_location | python | couler-proj/couler | couler/core/utils.py | https://github.com/couler-proj/couler/blob/master/couler/core/utils.py | Apache-2.0 |
def body(func_obj):
"""If a function A calls body(), the call returns the Python source code of
the function definition body (not including the signature) of A.
"""
if func_obj is None:
return None
code = inspect.getsource(func_obj)
# Remove function signature
code = code[code.find(":") + 1 :] # noqa: E203
# Function might be defined in some indented scope
# (e.g. in another function).
# We need to handle this and properly dedent the function source code
return textwrap.dedent(code) | If a function A calls body(), the call returns the Python source code of
the function definition body (not including the signature) of A.
| body | python | couler-proj/couler | couler/core/utils.py | https://github.com/couler-proj/couler/blob/master/couler/core/utils.py | Apache-2.0 |
def workflow_filename():
"""Return the Python file that defines the workflow.
"""
stacks = inspect.stack()
frame = inspect.stack()[len(stacks) - 1]
full_path = frame[0].f_code.co_filename
filename, _ = os.path.splitext(os.path.basename(full_path))
filename = argo_safe_name(filename)
return filename | Return the Python file that defines the workflow.
| workflow_filename | python | couler-proj/couler | couler/core/utils.py | https://github.com/couler-proj/couler/blob/master/couler/core/utils.py | Apache-2.0 |
def load_cluster_config():
"""Load user provided cluster specification file.
"""
module_file = os.getenv("couler_cluster_config")
if module_file is None:
return None
spec = util.spec_from_file_location(module_file, module_file)
module = util.module_from_spec(spec)
spec.loader.exec_module(module)
return module.cluster | Load user provided cluster specification file.
| load_cluster_config | python | couler-proj/couler | couler/core/utils.py | https://github.com/couler-proj/couler/blob/master/couler/core/utils.py | Apache-2.0 |
def encode_base64(s):
"""
Encode a string using base64 and return a binary string.
This function is used in Secret creation.
For example, the secrets for Argo YAML:
https://github.com/argoproj/argo/blob/master/examples/README.md#secrets
"""
bencode = base64.b64encode(s.encode("utf-8"))
return str(bencode, "utf-8") |
Encode a string using base64 and return a binary string.
This function is used in Secret creation.
For example, the secrets for Argo YAML:
https://github.com/argoproj/argo/blob/master/examples/README.md#secrets
| encode_base64 | python | couler-proj/couler | couler/core/utils.py | https://github.com/couler-proj/couler/blob/master/couler/core/utils.py | Apache-2.0 |
def generate_parameters_run_job(env):
"""
Generate the inputs parameter for running kubernetes resource
"""
envs = []
para_index = 0
parameters = []
args = []
if env is not None:
if isinstance(env, dict):
for key in env:
value = env[key]
# in case the env value contains the other steps
if key == "secrets":
if not isinstance(value, list):
raise ValueError(
"Secret environment should be a list."
)
envs.extend(value)
elif key == "inferred_outputs":
for v in value:
v = parse_argo_output(v, prefix="tasks")
envs.append(
{
"name": "couler.inferred_outputs.%s"
% para_index,
"value": v,
}
)
para_index += 1
elif (
isinstance(value, list)
and len(value) > 0
and isinstance(value[0], Output)
):
args.append(value[0].value)
para_name = input_parameter_name(
"run-job-para", para_index
)
parameters.append({"name": para_name})
env_value = "'{{input.parameters.%s}}'" % para_name
envs.append({"name": key, "value": env_value})
para_index = para_index + 1
else:
envs.append({"name": key, "value": env[key]})
else:
raise TypeError("Input env need to be a dict")
return envs, parameters, args |
Generate the inputs parameter for running kubernetes resource
| generate_parameters_run_job | python | couler-proj/couler | couler/core/utils.py | https://github.com/couler-proj/couler/blob/master/couler/core/utils.py | Apache-2.0 |
def convert_dict_to_env_list(d):
"""This is to convert a Python dictionary to a list, where
each list item is a dict with `name` and `value` keys.
"""
if not isinstance(d, dict):
raise TypeError("The input parameter `d` is not a dict.")
env_list = []
for k, v in d.items():
if isinstance(v, bool):
value = "'%s'" % v
env_list.append({"name": str(k), "value": value})
elif k == "secrets":
# TODO: only to add comments why "secrets" is special here
if not isinstance(v, list):
raise TypeError("The environment secrets should be a list.")
for s in v:
env_list.append(s)
else:
env_list.append({"name": str(k), "value": str(v)})
return env_list | This is to convert a Python dictionary to a list, where
each list item is a dict with `name` and `value` keys.
| convert_dict_to_env_list | python | couler-proj/couler | couler/core/utils.py | https://github.com/couler-proj/couler/blob/master/couler/core/utils.py | Apache-2.0 |
def gpu_requested(resources):
"""
Check whether the requested resources contains GPU.
Here resources is a dict like {"cpu": 1, "memory": 2,...}.
"""
if resources is None:
return False
if not isinstance(resources, dict):
raise TypeError("Parameter resources is required to be a dict")
for k, v in resources.items():
if "gpu" in k.strip().lower() and int(v) > 0:
return True
return False |
Check whether the requested resources contains GPU.
Here resources is a dict like {"cpu": 1, "memory": 2,...}.
| gpu_requested | python | couler-proj/couler | couler/core/utils.py | https://github.com/couler-proj/couler/blob/master/couler/core/utils.py | Apache-2.0 |
def bool_to_str(bool_val):
"""convert boolean to strings for YAML configuration"""
if not isinstance(bool_val, bool):
raise TypeError("The bool_val is required to be boolean type")
return "true" if bool_val else "false" | convert boolean to strings for YAML configuration | bool_to_str | python | couler-proj/couler | couler/core/utils.py | https://github.com/couler-proj/couler/blob/master/couler/core/utils.py | Apache-2.0 |
def concurrent(function_list, subtasks=False):
"""
Start different jobs at the same time
subtasks: each function F of function_list contains multiple steps.
Then, for each F, we create a sub-steps template.
"""
if not isinstance(function_list, list):
raise SyntaxError("require input functions as list")
_, con_caller_line = utils.invocation_location()
states._concurrent_func_line = con_caller_line
states._run_concurrent_lock = True
function_rets = []
for function in function_list:
# In case different parallel steps use the same function name
states._concurrent_func_id = states._concurrent_func_id + 1
if callable(function):
if subtasks is True:
# 1. generate the sub-steps template
# 2. for each step in F, update the sub_steps template
# 3. append the steps into the template
# 4. for F itself, update the main control flow step
states._sub_steps = OrderedDict()
tmp_concurrent_func_id = states._concurrent_func_id
states._run_concurrent_lock = False
ret = function()
states._concurrent_func_id = tmp_concurrent_func_id
func_name = "concurrent-task-%s" % states._concurrent_func_id
template = Steps(
name=func_name, steps=list(states._sub_steps.values())
)
states.workflow.add_template(template)
states._sub_steps = None
# TODO: add the args for the sub task
states._run_concurrent_lock = True
_update_steps(
"concurrent_func_name",
con_caller_line,
args=None,
template_name=func_name,
)
else:
ret = function()
function_rets.append(ret)
else:
raise TypeError("require loop over a function to run")
states._run_concurrent_lock = False
states._concurrent_func_id = 0
return function_rets |
Start different jobs at the same time
subtasks: each function F of function_list contains multiple steps.
Then, for each F, we create a sub-steps template.
| concurrent | python | couler-proj/couler | couler/core/syntax/concurrent.py | https://github.com/couler-proj/couler/blob/master/couler/core/syntax/concurrent.py | Apache-2.0 |
def when(condition, function):
"""Generates an Argo conditional step.
For example, the coinflip example in
https://github.com/argoproj/argo/blob/master/examples/coinflip.yaml.
"""
pre = condition["pre"]
post = condition["post"]
if pre is None or post is None:
raise SyntaxError("Output of function can not be null")
condition_suffix = condition["condition"]
pre_dict = output.extract_step_return(pre)
post_dict = output.extract_step_return(post)
if "name" in pre_dict:
left_function_id = pre_dict["id"]
if states.workflow.get_step(left_function_id) is None:
states.workflow.add_step(
name=left_function_id,
step=Step(name=left_function_id, template=pre_dict["name"]),
)
else:
# TODO: fixed if left branch is a variable rather than function
pre_dict["value"]
post_value = post_dict["value"]
if states._upstream_dag_task is not None:
step_type = "tasks"
states._when_task = pre_dict["id"]
else:
step_type = "steps"
states._when_prefix = "{{%s.%s.%s}} %s %s" % (
step_type,
pre_dict["id"],
pre_dict["output"],
condition_suffix,
post_value,
)
states._condition_id = "%s.%s" % (pre_dict["id"], pre_dict["output"])
# Enforce the function to run and lock to add into step
if callable(function):
function()
else:
raise TypeError("condition to run would be a function")
states._when_prefix = None
states._condition_id = None | Generates an Argo conditional step.
For example, the coinflip example in
https://github.com/argoproj/argo/blob/master/examples/coinflip.yaml.
| when | python | couler-proj/couler | couler/core/syntax/conditional.py | https://github.com/couler-proj/couler/blob/master/couler/core/syntax/conditional.py | Apache-2.0 |
def dag(dependency_graph):
"""
Generate a DAG of Argo YAML
Note: couler.set_dependencies() is more preferable.
https://github.com/argoproj/argo/blob/master/examples/dag-coinflip.yaml
"""
if not isinstance(dependency_graph, list):
raise SyntaxError("require input as list")
states.workflow.enable_dag_mode()
_, call_line = utils.invocation_location()
states._dag_caller_line = call_line
for edges in dependency_graph:
states._upstream_dag_task = None
if isinstance(edges, list):
for node in edges:
if callable(node):
node()
else:
raise TypeError("require loop over a function to run") |
Generate a DAG of Argo YAML
Note: couler.set_dependencies() is more preferable.
https://github.com/argoproj/argo/blob/master/examples/dag-coinflip.yaml
| dag | python | couler-proj/couler | couler/core/syntax/dag.py | https://github.com/couler-proj/couler/blob/master/couler/core/syntax/dag.py | Apache-2.0 |
def set_dependencies(step_function, dependencies=None):
"""
:param step_function: step to run
:param dependencies: the list of dependencies of this step. This can be in
either of the following forms:
1. a list of step names;
2. a string representing the enhanced depends logic that specifies
dependencies based on their statuses. See the link below for the
supported syntax:
https://github.com/argoproj/argo/blob/master/docs/enhanced-depends-logic.md
:return:
"""
if dependencies is not None:
if isinstance(dependencies, list):
# A list of dependencies
states._upstream_dag_task = dependencies
states._upstream_dag_depends_logic = None
elif isinstance(dependencies, str):
# Dependencies using enhanced depends logic
states._upstream_dag_depends_logic = dependencies
states._upstream_dag_task = None
else:
raise SyntaxError("dependencies must be a list or a string")
else:
states._upstream_dag_depends_logic = None
states._upstream_dag_task = None
if not callable(step_function):
raise SyntaxError("require step_function to a function")
states.workflow.enable_dag_mode()
states._outputs_tmp = []
if dependencies is not None and isinstance(dependencies, list):
for step in dependencies:
output = states.get_step_output(step)
for o in output:
if isinstance(o, (OutputArtifact, OutputParameter, OutputJob)):
states._outputs_tmp.append(o)
ret = step_function()
states._outputs_tmp = None
return ret |
:param step_function: step to run
:param dependencies: the list of dependencies of this step. This can be in
either of the following forms:
1. a list of step names;
2. a string representing the enhanced depends logic that specifies
dependencies based on their statuses. See the link below for the
supported syntax:
https://github.com/argoproj/argo/blob/master/docs/enhanced-depends-logic.md
:return:
| set_dependencies | python | couler-proj/couler | couler/core/syntax/dag.py | https://github.com/couler-proj/couler/blob/master/couler/core/syntax/dag.py | Apache-2.0 |
def set_exit_handler(status, exit_handler):
"""
Configure the workflow handler
Status would be: Succeeded, Failed, or Error.
Each status invokes one exit_handler function.
https://github.com/argoproj/argo/blob/master/examples/exit-handlers.yaml
"""
if not callable(exit_handler):
raise SyntaxError("require exit handler is a function")
if not isinstance(status, WFStatus): # noqa: F405
raise SyntaxError(
"require input status to be Succeeded, Failed or Error"
)
workflow_status = "{{workflow.status}} == %s" % status.value
states._exit_handler_enable = True
states._when_prefix = workflow_status
branch = exit_handler()
if branch is None:
raise SyntaxError("require function return value")
states._when_prefix = None
states._exit_handler_enable = False |
Configure the workflow handler
Status would be: Succeeded, Failed, or Error.
Each status invokes one exit_handler function.
https://github.com/argoproj/argo/blob/master/examples/exit-handlers.yaml
| set_exit_handler | python | couler-proj/couler | couler/core/syntax/exit_handler.py | https://github.com/couler-proj/couler/blob/master/couler/core/syntax/exit_handler.py | Apache-2.0 |
def exec_while(condition, inner_while):
"""
Generate the Argo recursive logic. For example
https://github.com/argoproj/argo/blob/master/examples/README.md#recursion.
"""
# _while_lock means 'exec_while' operation begins to work
# _while_steps stores logic steps inside the recursion logic
states._while_lock = True
# Enforce inner function of the while-loop to run
if callable(inner_while):
branch = inner_while()
if branch is None:
raise SyntaxError("require function return value")
else:
raise TypeError("condition to run would be a function")
branch_dict = output.extract_step_return(branch)
recursive_name = "exec-while-" + branch_dict["name"]
recursive_id = "exec-while-" + branch_dict["id"]
if states.workflow.get_template(recursive_name) is None:
template = Steps(name=recursive_name)
else:
raise SyntaxError("Recursive function can not be called twice ")
# Generate leaving point for recursive
step_out_name = "%s-%s" % (recursive_name, "exit")
pre = condition["pre"]
pre_dict = output.extract_step_return(pre)
condition_suffix = condition["condition"]
# Generate the recursive go to step
when_prefix = "{{steps.%s.%s}} %s %s" % (
branch_dict["id"],
branch_dict["output"],
condition_suffix,
pre_dict["value"],
)
step_out_template = OrderedDict(
{
"name": step_out_name,
"template": recursive_name,
"when": when_prefix,
}
)
step_out_id = utils.invocation_name(step_out_name, recursive_id)
states._while_steps[step_out_id] = [step_out_template]
# Add steps inside the recursive logic to recursive template
template.steps = list(states._while_steps.values())
# Add this recursive logic to the templates
states.workflow.add_template(template)
# Add recursive logic to global _steps
recursive_out_step = Step(name=recursive_id, template=recursive_name)
states.workflow.add_step(name=recursive_id, step=recursive_out_step)
states._while_lock = False
states._while_steps = OrderedDict() |
Generate the Argo recursive logic. For example
https://github.com/argoproj/argo/blob/master/examples/README.md#recursion.
| exec_while | python | couler-proj/couler | couler/core/syntax/recursion.py | https://github.com/couler-proj/couler/blob/master/couler/core/syntax/recursion.py | Apache-2.0 |
def _container_output(step_name, template_name, output):
"""Generate output name from an Argo container template. For example,
"{{steps.generate-parameter.outputs.parameters.hello-param}}" used in
https://github.com/argoproj/argo/tree/master/examples#output-parameters.
Each element of return for run_container is contacted by:
couler.step_name.template_name.output.parameters.output_id
"""
rets = []
if output is None:
ret = "couler.%s.%s.outputs.parameters.%s" % (
step_name,
template_name,
"1",
)
rets.append(OutputEmpty(value=ret))
return rets
output_is_parameter = True
if "parameters" in output:
_outputs = output["parameters"]
elif "artifacts" in output:
_outputs = output["artifacts"]
output_is_parameter = False
if isinstance(_outputs, str):
_outputs = [_outputs]
if isinstance(_outputs, list):
for o in _outputs:
output_id = o["name"]
is_global = "globalName" in o
if output_is_parameter:
if is_global:
ret = "couler.workflow.outputs.parameters.%s" % output_id
else:
ret = "couler.%s.%s.outputs.parameters.%s" % (
step_name,
template_name,
output_id,
)
rets.append(OutputParameter(value=ret, is_global=is_global))
else:
if is_global:
ret = "couler.workflow.outputs.artifacts.%s" % output_id
else:
ret = "couler.%s.%s.outputs.artifacts.%s" % (
step_name,
template_name,
output_id,
)
rets.append(
OutputArtifact(
value=ret,
path=o["path"],
artifact=o,
is_global=is_global,
)
)
else:
raise SyntaxError("Container output must be a list")
return rets | Generate output name from an Argo container template. For example,
"{{steps.generate-parameter.outputs.parameters.hello-param}}" used in
https://github.com/argoproj/argo/tree/master/examples#output-parameters.
Each element of return for run_container is contacted by:
couler.step_name.template_name.output.parameters.output_id
| _container_output | python | couler-proj/couler | couler/core/templates/output.py | https://github.com/couler-proj/couler/blob/master/couler/core/templates/output.py | Apache-2.0 |
def _script_output(step_name, template_name, output):
"""Generate output name from an Argo script template. For example,
"{{steps.generate.outputs.result}}" in
https://github.com/argoproj/argo/tree/master/examples#scripts--results
Return of run_script is contacted by:
couler.step_name.template_name.outputs.result
"""
value = "couler.%s.%s.outputs.result" % (step_name, template_name)
output_script = OutputScript(value=value)
if output is None:
return [output_script]
rets = _container_output(step_name, template_name, output)
rets.append(output_script)
return rets | Generate output name from an Argo script template. For example,
"{{steps.generate.outputs.result}}" in
https://github.com/argoproj/argo/tree/master/examples#scripts--results
Return of run_script is contacted by:
couler.step_name.template_name.outputs.result
| _script_output | python | couler-proj/couler | couler/core/templates/output.py | https://github.com/couler-proj/couler/blob/master/couler/core/templates/output.py | Apache-2.0 |
def _job_output(step_name, template_name):
"""
:param step_name:
:param template_name:
https://github.com/argoproj/argo/blob/master/examples/k8s-jobs.yaml#L44
Return the job name and job id for running a job
"""
job_name = "couler.%s.%s.outputs.parameters.job-name" % (
step_name,
template_name,
)
job_id = "couler.%s.%s.outputs.parameters.job-id" % (
step_name,
template_name,
)
job_obj = "couler.%s.%s.outputs.parameters.job-obj" % (
step_name,
template_name,
)
return [
OutputJob(
value=job_name, job_name=job_name, job_obj=job_obj, job_id=job_id
)
] |
:param step_name:
:param template_name:
https://github.com/argoproj/argo/blob/master/examples/k8s-jobs.yaml#L44
Return the job name and job id for running a job
| _job_output | python | couler-proj/couler | couler/core/templates/output.py | https://github.com/couler-proj/couler/blob/master/couler/core/templates/output.py | Apache-2.0 |
def extract_step_return(step_output):
"""Extract information for run container or script output.
step_output is a list with multiple outputs
"""
ret = {}
if isinstance(step_output, list):
# The first element of outputs is used for control flow operation
step_output = step_output[0]
# In case user input a normal variable
if not isinstance(step_output, Output):
ret["value"] = step_output
return ret
else:
tmp = step_output.value.split(".")
if len(tmp) < 4:
raise ValueError("Incorrect step return representation")
step_name = tmp[1]
template_name = tmp[2]
# To avoid duplicate map function
output = tmp[3]
for item in tmp[4:]:
output = output + "." + item
ret = {"name": template_name, "id": step_name, "output": output}
return ret
else:
ret["value"] = step_output
return ret | Extract information for run container or script output.
step_output is a list with multiple outputs
| extract_step_return | python | couler-proj/couler | couler/core/templates/output.py | https://github.com/couler-proj/couler/blob/master/couler/core/templates/output.py | Apache-2.0 |
def to_yaml(self):
"""Covert the secret to a secret CRD specification."""
d = OrderedDict(
{
"apiVersion": "v1",
"kind": "Secret",
"metadata": {"name": self.name, "namespace": self.namespace} if self.namespace != "default" else {"name": self.name},
"type": "Opaque",
"data": {},
}
)
for k, v in self.data.items():
d["data"][k] = v
return d | Covert the secret to a secret CRD specification. | to_yaml | python | couler-proj/couler | couler/core/templates/secret.py | https://github.com/couler-proj/couler/blob/master/couler/core/templates/secret.py | Apache-2.0 |
def to_env_list(self):
"""
Convert the secret to an environment list, and can be attached to
containers.
"""
secret_envs = []
for key, _ in self.data.items():
secret_env = {
"name": key,
"valueFrom": {"secretKeyRef": {"name": self.name, "key": key}},
}
secret_envs.append(secret_env)
return secret_envs |
Convert the secret to an environment list, and can be attached to
containers.
| to_env_list | python | couler-proj/couler | couler/core/templates/secret.py | https://github.com/couler-proj/couler/blob/master/couler/core/templates/secret.py | Apache-2.0 |
def run(
raw_template,
tuning_params,
objective,
success_condition=None,
failure_condition=None,
algorithm="random",
parallel_trial_count=3,
max_trial_count=12,
max_failed_trial_count=3,
):
"""
Args:
tuning_params: A dictionary of hyperparameters to be tuned.
objective: The dictionary of objective for model training.
success_condition: The condition to indicate when a Katib
experiment succeeds.
failure_condition: The condition to indicate when a Katib
experiment fails.
algorithm: The algorithm used in model training.
raw_template: The YAML string for containing Katib trial manifest.
parallel_trial_count: The number of parallel Katib trials.
max_trial_count: The total number of Katib trials.
max_failed_trial_count: The total number of failed Katib trials.
"""
_validate_objective(objective)
_validate_tuning_params(tuning_params)
manifest = katib_manifest_template.format(
parallel_trial_count=parallel_trial_count,
max_trial_count=max_trial_count,
max_failed_trial_count=max_failed_trial_count,
obj_type=objective["type"],
obj_goal=objective["goal"],
obj_metric_name=objective["metric_name"],
algorithm=algorithm,
raw_template=raw_template,
)
wf_yaml = yaml.load(StringIO(manifest), Loader=yaml.FullLoader)
parameters = []
for i in range(0, len(tuning_params)):
param = {
"name": "--%s" % tuning_params[i]["name"],
"parameterType": tuning_params[i]["type"],
"feasibleSpace": {
"min": '"%d"' % tuning_params[i]["range"][0],
"max": '"%d"' % tuning_params[i]["range"][1],
},
}
parameters.append(param)
wf_yaml["spec"]["parameters"] = parameters
manifest = yaml.dump(wf_yaml, default_flow_style=False)
couler.run_job(
manifest=manifest,
success_condition=success_condition,
failure_condition=failure_condition,
) |
Args:
tuning_params: A dictionary of hyperparameters to be tuned.
objective: The dictionary of objective for model training.
success_condition: The condition to indicate when a Katib
experiment succeeds.
failure_condition: The condition to indicate when a Katib
experiment fails.
algorithm: The algorithm used in model training.
raw_template: The YAML string for containing Katib trial manifest.
parallel_trial_count: The number of parallel Katib trials.
max_trial_count: The total number of Katib trials.
max_failed_trial_count: The total number of failed Katib trials.
| run | python | couler-proj/couler | couler/steps/katib.py | https://github.com/couler-proj/couler/blob/master/couler/steps/katib.py | Apache-2.0 |
def test_workflow_security_context(self):
"""
The securityContext configuration mostly taken from
https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
"""
self.assertIsNone(couler.workflow.security_context)
flip_coin()
self.assertNotIn("securityContext", couler.workflow_yaml()["spec"])
couler.states.workflow.set_security_context(
security_context={
"fsGroup": 2000,
"runAsNonRoot": True,
"runAsUser": 1000,
"fsGroupChangePolicy": "OnRootMismatch",
"runAsGroup": 3000,
"supplementalGroups": [1000, 4000, 4500],
"seLinuxOptions": {"level": "s0:c123,c456"},
"seccompProfile": {
"localhostProfile": "my-profiles/profile-allow.json",
"type": "Localhost",
},
"sysctls": [
{"name": "kernel.shm_rmid_forced", "value": "0"},
{"name": "net.core.somaxconn", "value": "1024"},
],
}
)
self.assertTrue(couler.workflow.security_context is not None)
actual_wf = couler.workflow_yaml()
self.assertIn("securityContext", actual_wf["spec"])
actual_security_context = actual_wf["spec"]["securityContext"]
self.assertEqual(actual_security_context["fsGroup"], 2000)
self.assertEqual(actual_security_context["runAsNonRoot"], True)
self.assertEqual(actual_security_context["runAsUser"], 1000)
self.assertEqual(
actual_security_context["fsGroupChangePolicy"], "OnRootMismatch"
)
self.assertEqual(actual_security_context["runAsGroup"], 3000)
self.assertEqual(
actual_security_context["supplementalGroups"], [1000, 4000, 4500]
)
self.assertEqual(
actual_security_context["seLinuxOptions"]["level"], "s0:c123,c456"
)
self.assertEqual(
actual_security_context["seccompProfile"]["localhostProfile"],
"my-profiles/profile-allow.json",
)
self.assertEqual(
actual_security_context["seccompProfile"]["type"], "Localhost"
)
self.assertEqual(
actual_security_context["sysctls"][0]["name"],
"kernel.shm_rmid_forced",
)
self.assertEqual(actual_security_context["sysctls"][0]["value"], "0")
self.assertEqual(
actual_security_context["sysctls"][1]["name"], "net.core.somaxconn"
)
self.assertEqual(
actual_security_context["sysctls"][1]["value"], "1024"
)
couler._cleanup()
self.assertFalse(couler.workflow.security_context is not None) |
The securityContext configuration mostly taken from
https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
| test_workflow_security_context | python | couler-proj/couler | couler/tests/workflow_basic_test.py | https://github.com/couler-proj/couler/blob/master/couler/tests/workflow_basic_test.py | Apache-2.0 |
def random_code() -> None:
"""
Randomly generate a 'success' or 'fail'
to let sys.exit emulate a task final state
"""
import random
task = ['success', 'fail']
res = random.randint(0, 1)
res = task[res]
print(f'{res}')
if res == 'fail':
sys.exit(2) |
Randomly generate a 'success' or 'fail'
to let sys.exit emulate a task final state
| random_code | python | couler-proj/couler | examples/depends.py | https://github.com/couler-proj/couler/blob/master/examples/depends.py | Apache-2.0 |
def __init__(
self,
data_url,
text_features=None,
label_column=None,
data_dir="",
citation="",
url="",
label_classes=None,
process_label=lambda x: x,
**kwargs,
):
"""BuilderConfig for CLUE.
Args:
text_features: `dict[string, string]`, map from the name of the feature
dict for each text field to the name of the column in the tsv file
label_column: `string`, name of the column in the tsv file corresponding
to the label
data_url: `string`, url to download the zip file from
data_dir: `string`, the path to the folder containing the tsv files in the
downloaded zip
citation: `string`, citation for the data set
url: `string`, url for information about the data set
label_classes: `list[string]`, the list of classes if the label is
categorical. If not provided, then the label will be of type
`datasets.Value('float32')`.
process_label: `Function[string, any]`, function taking in the raw value
of the label and processing it to the form required by the label feature
**kwargs: keyword arguments forwarded to super.
"""
super(ClueConfig, self).__init__(
version=datasets.Version("1.0.0", ""), **kwargs
)
self.text_features = text_features
self.label_column = label_column
self.label_classes = label_classes
self.data_url = data_url
self.data_dir = data_dir
self.citation = citation
self.url = url
self.process_label = process_label | BuilderConfig for CLUE.
Args:
text_features: `dict[string, string]`, map from the name of the feature
dict for each text field to the name of the column in the tsv file
label_column: `string`, name of the column in the tsv file corresponding
to the label
data_url: `string`, url to download the zip file from
data_dir: `string`, the path to the folder containing the tsv files in the
downloaded zip
citation: `string`, citation for the data set
url: `string`, url for information about the data set
label_classes: `list[string]`, the list of classes if the label is
categorical. If not provided, then the label will be of type
`datasets.Value('float32')`.
process_label: `Function[string, any]`, function taking in the raw value
of the label and processing it to the form required by the label feature
**kwargs: keyword arguments forwarded to super.
| __init__ | python | JunnYu/RoFormer_pytorch | examples/clue/classification/clue_10.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/examples/clue/classification/clue_10.py | Apache-2.0 |
def __init__(
self,
data_url,
text_features=None,
label_column=None,
data_dir="",
citation="",
url="",
label_classes=None,
process_label=lambda x: x,
**kwargs,
):
"""BuilderConfig for CLUE.
Args:
text_features: `dict[string, string]`, map from the name of the feature
dict for each text field to the name of the column in the tsv file
label_column: `string`, name of the column in the tsv file corresponding
to the label
data_url: `string`, url to download the zip file from
data_dir: `string`, the path to the folder containing the tsv files in the
downloaded zip
citation: `string`, citation for the data set
url: `string`, url for information about the data set
label_classes: `list[string]`, the list of classes if the label is
categorical. If not provided, then the label will be of type
`datasets.Value('float32')`.
process_label: `Function[string, any]`, function taking in the raw value
of the label and processing it to the form required by the label feature
**kwargs: keyword arguments forwarded to super.
"""
super(ClueConfig, self).__init__(
version=datasets.Version("1.0.0", ""), **kwargs
)
self.text_features = text_features
self.label_column = label_column
self.label_classes = label_classes
self.data_url = data_url
self.data_dir = data_dir
self.citation = citation
self.url = url
self.process_label = process_label | BuilderConfig for CLUE.
Args:
text_features: `dict[string, string]`, map from the name of the feature
dict for each text field to the name of the column in the tsv file
label_column: `string`, name of the column in the tsv file corresponding
to the label
data_url: `string`, url to download the zip file from
data_dir: `string`, the path to the folder containing the tsv files in the
downloaded zip
citation: `string`, citation for the data set
url: `string`, url for information about the data set
label_classes: `list[string]`, the list of classes if the label is
categorical. If not provided, then the label will be of type
`datasets.Value('float32')`.
process_label: `Function[string, any]`, function taking in the raw value
of the label and processing it to the form required by the label feature
**kwargs: keyword arguments forwarded to super.
| __init__ | python | JunnYu/RoFormer_pytorch | examples/clue/classification/clue_11.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/examples/clue/classification/clue_11.py | Apache-2.0 |
def create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[1]
text_b = None
label = line[0]
examples.append(
InputExample(guid=guid, texts=[text_a, text_b], label=label)
)
return examples | Creates examples for the training and dev sets. | create_examples | python | JunnYu/RoFormer_pytorch | examples/dummpy/task_text_classification_chnsenti.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/examples/dummpy/task_text_classification_chnsenti.py | Apache-2.0 |
def _init_weight(out: nn.Parameter):
"""
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
"""
n_pos, dim = out.shape
position_enc = np.array(
[
[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)]
for pos in range(n_pos)
]
)
out.requires_grad = False # set early to avoid an error in pytorch-1.8+
sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1
out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
out.detach_()
return out |
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
| _init_weight | python | JunnYu/RoFormer_pytorch | src/roformer/modeling_roformer.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/src/roformer/modeling_roformer.py | Apache-2.0 |
def forward(self, seq_len: int, past_key_values_length: int = 0):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
positions = torch.arange(
past_key_values_length,
past_key_values_length + seq_len,
dtype=torch.long,
device=self.weight.device,
)
return super().forward(positions) | `input_ids_shape` is expected to be [bsz x seqlen]. | forward | python | JunnYu/RoFormer_pytorch | src/roformer/modeling_roformer.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/src/roformer/modeling_roformer.py | Apache-2.0 |
def load_tf_weights_in_roformer(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name.replace("bert", "roformer"))
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n
in [
"adam_v",
"adam_m",
"AdamWeightDecayOptimizer",
"AdamWeightDecayOptimizer_1",
"global_step",
]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
if not pointer.shape == array.shape:
raise ValueError(
f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
)
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model | Load tf checkpoints in a pytorch model. | load_tf_weights_in_roformer | python | JunnYu/RoFormer_pytorch | src/roformer/modeling_roformer.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/src/roformer/modeling_roformer.py | Apache-2.0 |
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = (
past_key_values[0][0].shape[2] if past_key_values is not None else 0
)
if attention_mask is None:
attention_mask = torch.ones(
((batch_size, seq_length + past_key_values_length)), device=device
)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
attention_mask, input_shape, device, past_key_values_length
)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
(
encoder_batch_size,
encoder_sequence_length,
_,
) = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(
encoder_attention_mask
)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
if hasattr(self, "embeddings_project"):
embedding_output = self.embeddings_project(embedding_output)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.add_pooling_layer else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
) |
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
| forward | python | JunnYu/RoFormer_pytorch | src/roformer/modeling_roformer.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/src/roformer/modeling_roformer.py | Apache-2.0 |
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.
"""
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
outputs = self.roformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)
)
if not return_dict:
output = (prediction_scores,) + outputs[1:]
return (
((masked_lm_loss,) + output) if masked_lm_loss is not None else output
)
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
) |
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.
| forward | python | JunnYu/RoFormer_pytorch | src/roformer/modeling_roformer.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/src/roformer/modeling_roformer.py | Apache-2.0 |
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
outputs = self.roformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (
labels.dtype == torch.long or labels.dtype == torch.int
):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
) |
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
| forward | python | JunnYu/RoFormer_pytorch | src/roformer/modeling_roformer.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/src/roformer/modeling_roformer.py | Apache-2.0 |
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
num_choices = (
input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
)
input_ids = (
input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
)
attention_mask = (
attention_mask.view(-1, attention_mask.size(-1))
if attention_mask is not None
else None
)
token_type_ids = (
token_type_ids.view(-1, token_type_ids.size(-1))
if token_type_ids is not None
else None
)
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.roformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
pooled_output = self.sequence_summary(sequence_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
) |
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
| forward | python | JunnYu/RoFormer_pytorch | src/roformer/modeling_roformer.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/src/roformer/modeling_roformer.py | Apache-2.0 |
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
outputs = self.roformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss,
labels.view(-1),
torch.tensor(loss_fct.ignore_index).type_as(labels),
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
) |
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
| forward | python | JunnYu/RoFormer_pytorch | src/roformer/modeling_roformer.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/src/roformer/modeling_roformer.py | Apache-2.0 |
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
outputs = self.roformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
) |
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
| forward | python | JunnYu/RoFormer_pytorch | src/roformer/modeling_roformer.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/src/roformer/modeling_roformer.py | Apache-2.0 |
def build(self, input_shape: tf.TensorShape):
"""
Build shared token embedding layer Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
weight = self._init_weight(self.num_positions, self.embedding_dim)
self.weight = self.add_weight(
name="embeddings",
shape=[self.num_positions, self.embedding_dim],
)
weight = tf.cast(weight, dtype=self.weight.dtype)
self.weight.assign(weight)
super().build(input_shape) |
Build shared token embedding layer Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
| build | python | JunnYu/RoFormer_pytorch | src/roformer/modeling_tf_roformer.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/src/roformer/modeling_tf_roformer.py | Apache-2.0 |
def _init_weight(n_pos: int, dim: int):
"""
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
"""
position_enc = np.array(
[
[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)]
for pos in range(n_pos)
]
)
table = np.zeros_like(position_enc)
# index 0 is all zero
table[:, 0 : dim // 2] = np.sin(position_enc[:, 0::2])
table[:, dim // 2 :] = np.cos(position_enc[:, 1::2])
# convert to tensor
table = tf.convert_to_tensor(table)
tf.stop_gradient(table)
return table |
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
| _init_weight | python | JunnYu/RoFormer_pytorch | src/roformer/modeling_tf_roformer.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/src/roformer/modeling_tf_roformer.py | Apache-2.0 |
def call(self, input_shape: tf.TensorShape, past_key_values_length: int = 0):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input_shape[:2]
positions = tf.range(
past_key_values_length,
seq_len + past_key_values_length,
delta=1,
name="range",
)
return tf.gather(self.weight, positions) | Input is expected to be of size [bsz x seqlen]. | call | python | JunnYu/RoFormer_pytorch | src/roformer/modeling_tf_roformer.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/src/roformer/modeling_tf_roformer.py | Apache-2.0 |
def call(
self,
input_ids: tf.Tensor = None,
token_type_ids: tf.Tensor = None,
inputs_embeds: tf.Tensor = None,
training: bool = False,
) -> tf.Tensor:
"""
Applies embedding based on inputs tensor.
Returns:
final_embeddings (:obj:`tf.Tensor`): output embedding tensor.
"""
assert not (input_ids is None and inputs_embeds is None)
if input_ids is not None:
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:-1]
if token_type_ids is None:
token_type_ids = tf.fill(dims=input_shape, value=0)
token_type_embeds = tf.gather(
params=self.token_type_embeddings, indices=token_type_ids
)
final_embeddings = self.embeddings_sum(
inputs=[inputs_embeds, token_type_embeds]
)
final_embeddings = self.LayerNorm(inputs=final_embeddings)
final_embeddings = self.dropout(inputs=final_embeddings, training=training)
return final_embeddings |
Applies embedding based on inputs tensor.
Returns:
final_embeddings (:obj:`tf.Tensor`): output embedding tensor.
| call | python | JunnYu/RoFormer_pytorch | src/roformer/modeling_tf_roformer.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/src/roformer/modeling_tf_roformer.py | Apache-2.0 |
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
r"""
labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.roformer(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
prediction_scores = self.mlm(
sequence_output=sequence_output, training=inputs["training"]
)
loss = (
None
if inputs["labels"] is None
else self.compute_loss(labels=inputs["labels"], logits=prediction_scores)
)
if not inputs["return_dict"]:
output = (prediction_scores,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFMaskedLMOutput(
loss=loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
) |
labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
| call | python | JunnYu/RoFormer_pytorch | src/roformer/modeling_tf_roformer.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/src/roformer/modeling_tf_roformer.py | Apache-2.0 |
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFCausalLMOutput, Tuple[tf.Tensor]]:
r"""
labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the cross entropy classification loss. Indices should be in ``[0, ...,
config.vocab_size - 1]``.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.roformer(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
logits = self.mlm(sequence_output=sequence_output, training=inputs["training"])
loss = None
if inputs["labels"] is not None:
# shift labels to the left and cut last logit token
logits = logits[:, :-1]
labels = inputs["labels"][:, 1:]
loss = self.compute_loss(labels=labels, logits=logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFCausalLMOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
) |
labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the cross entropy classification loss. Indices should be in ``[0, ...,
config.vocab_size - 1]``.
| call | python | JunnYu/RoFormer_pytorch | src/roformer/modeling_tf_roformer.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/src/roformer/modeling_tf_roformer.py | Apache-2.0 |
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.roformer(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
logits = self.classifier(hidden_states=outputs[0], training=inputs["training"])
loss = (
None
if inputs["labels"] is None
else self.compute_loss(labels=inputs["labels"], logits=logits)
)
if not inputs["return_dict"]:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
) |
labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
| call | python | JunnYu/RoFormer_pytorch | src/roformer/modeling_tf_roformer.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/src/roformer/modeling_tf_roformer.py | Apache-2.0 |
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
r"""
labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None:
num_choices = shape_list(inputs["input_ids"])[1]
seq_length = shape_list(inputs["input_ids"])[2]
else:
num_choices = shape_list(inputs["inputs_embeds"])[1]
seq_length = shape_list(inputs["inputs_embeds"])[2]
flat_input_ids = (
tf.reshape(tensor=inputs["input_ids"], shape=(-1, seq_length))
if inputs["input_ids"] is not None
else None
)
flat_attention_mask = (
tf.reshape(tensor=inputs["attention_mask"], shape=(-1, seq_length))
if inputs["attention_mask"] is not None
else None
)
flat_token_type_ids = (
tf.reshape(tensor=inputs["token_type_ids"], shape=(-1, seq_length))
if inputs["token_type_ids"] is not None
else None
)
flat_inputs_embeds = (
tf.reshape(
tensor=inputs["inputs_embeds"],
shape=(-1, seq_length, shape_list(inputs["inputs_embeds"])[3]),
)
if inputs["inputs_embeds"] is not None
else None
)
outputs = self.roformer(
input_ids=flat_input_ids,
attention_mask=flat_attention_mask,
token_type_ids=flat_token_type_ids,
head_mask=inputs["head_mask"],
inputs_embeds=flat_inputs_embeds,
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
logits = self.sequence_summary(inputs=outputs[0], training=inputs["training"])
logits = self.classifier(inputs=logits)
reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices))
loss = (
None
if inputs["labels"] is None
else self.compute_loss(labels=inputs["labels"], logits=reshaped_logits)
)
if not inputs["return_dict"]:
output = (reshaped_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFMultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
) |
labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
| call | python | JunnYu/RoFormer_pytorch | src/roformer/modeling_tf_roformer.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/src/roformer/modeling_tf_roformer.py | Apache-2.0 |
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.roformer(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
sequence_output = self.dropout(
inputs=sequence_output, training=inputs["training"]
)
logits = self.classifier(inputs=sequence_output)
loss = (
None
if inputs["labels"] is None
else self.compute_loss(labels=inputs["labels"], logits=logits)
)
if not inputs["return_dict"]:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
) |
labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
| call | python | JunnYu/RoFormer_pytorch | src/roformer/modeling_tf_roformer.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/src/roformer/modeling_tf_roformer.py | Apache-2.0 |
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
start_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
end_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
r"""
start_positions (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
start_positions=start_positions,
end_positions=end_positions,
training=training,
kwargs_call=kwargs,
)
outputs = self.roformer(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
logits = self.qa_outputs(inputs=sequence_output)
start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
start_logits = tf.squeeze(input=start_logits, axis=-1)
end_logits = tf.squeeze(input=end_logits, axis=-1)
loss = None
if (
inputs["start_positions"] is not None
and inputs["end_positions"] is not None
):
labels = {"start_position": inputs["start_positions"]}
labels["end_position"] = inputs["end_positions"]
loss = self.compute_loss(labels=labels, logits=(start_logits, end_logits))
if not inputs["return_dict"]:
output = (start_logits, end_logits) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFQuestionAnsweringModelOutput(
loss=loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
) |
start_positions (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
| call | python | JunnYu/RoFormer_pytorch | src/roformer/modeling_tf_roformer.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/src/roformer/modeling_tf_roformer.py | Apache-2.0 |
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A RoFormer sequence has the following format:
- single sequence: ``[CLS] X [SEP]``
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep |
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A RoFormer sequence has the following format:
- single sequence: ``[CLS] X [SEP]``
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
| build_inputs_with_special_tokens | python | JunnYu/RoFormer_pytorch | src/roformer/tokenization_roformer.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/src/roformer/tokenization_roformer.py | Apache-2.0 |
def get_special_tokens_mask(
self,
token_ids_0: List[int],
token_ids_1: Optional[List[int]] = None,
already_has_special_tokens: bool = False,
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` method.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0,
token_ids_1=token_ids_1,
already_has_special_tokens=True,
)
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1] |
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` method.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
| get_special_tokens_mask | python | JunnYu/RoFormer_pytorch | src/roformer/tokenization_roformer.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/src/roformer/tokenization_roformer.py | Apache-2.0 |
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A RoFormer
sequence pair mask has the following format:
::
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given
sequence(s).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] |
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A RoFormer
sequence pair mask has the following format:
::
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given
sequence(s).
| create_token_type_ids_from_sequences | python | JunnYu/RoFormer_pytorch | src/roformer/tokenization_roformer.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/src/roformer/tokenization_roformer.py | Apache-2.0 |
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A RoFormer sequence has the following format:
- single sequence: ``[CLS] X [SEP]``
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
if token_ids_1:
output += token_ids_1 + [self.sep_token_id]
return output |
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A RoFormer sequence has the following format:
- single sequence: ``[CLS] X [SEP]``
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
| build_inputs_with_special_tokens | python | JunnYu/RoFormer_pytorch | src/roformer/tokenization_roformer_fast.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/src/roformer/tokenization_roformer_fast.py | Apache-2.0 |
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A RoFormer
sequence pair mask has the following format:
::
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given
sequence(s).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] |
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A RoFormer
sequence pair mask has the following format:
::
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given
sequence(s).
| create_token_type_ids_from_sequences | python | JunnYu/RoFormer_pytorch | src/roformer/tokenization_roformer_fast.py | https://github.com/JunnYu/RoFormer_pytorch/blob/master/src/roformer/tokenization_roformer_fast.py | Apache-2.0 |
def parse_arguments():
"""Parse and return the command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('--result_dir',
help="Directory where the results are saved.")
parser.add_argument('--checkpoint_dir',
help="Directory that contains checkpoints.")
parser.add_argument('--params', '--params_file', '--params_file_path',
help="Path to the file that defines the "
"hyperparameters.")
parser.add_argument('--config', help="Path to the configuration file.")
parser.add_argument('--runs', type=int, default="1",
help="Times to run the inference process.")
parser.add_argument('--rows', type=int, default=5,
help="Number of images per row to be generated.")
parser.add_argument('--columns', type=int, default=5,
help="Number of images per column to be generated.")
parser.add_argument('--lower', type=float, default=-2,
help="Lower bound of the truncated normal random "
"variables.")
parser.add_argument('--upper', type=float, default=2,
help="Upper bound of the truncated normal random "
"variables.")
parser.add_argument('--gpu', '--gpu_device_num', type=str, default="0",
help="The GPU device number to use.")
args = parser.parse_args()
return args | Parse and return the command line arguments. | parse_arguments | python | salu133445/musegan | src/inference.py | https://github.com/salu133445/musegan/blob/master/src/inference.py | MIT |
def setup():
"""Parse command line arguments, load model parameters, load configurations
and setup environment."""
# Parse the command line arguments
args = parse_arguments()
# Load parameters
params = load_yaml(args.params)
# Load training configurations
config = load_yaml(args.config)
update_not_none(config, vars(args))
# Set unspecified schedule steps to default values
for target in (config['learning_rate_schedule'], config['slope_schedule']):
if target['start'] is None:
target['start'] = 0
if target['end'] is None:
target['end'] = config['steps']
# Make sure result directory exists
make_sure_path_exists(config['result_dir'])
# Setup GPUs
os.environ["CUDA_VISIBLE_DEVICES"] = config['gpu']
return params, config | Parse command line arguments, load model parameters, load configurations
and setup environment. | setup | python | salu133445/musegan | src/inference.py | https://github.com/salu133445/musegan/blob/master/src/inference.py | MIT |
def parse_arguments():
"""Parse and return the command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('--result_dir',
help="Directory where the results are saved.")
parser.add_argument('--checkpoint_dir',
help="Directory that contains checkpoints.")
parser.add_argument('--params', '--params_file', '--params_file_path',
help="Path to the file that defines the "
"hyperparameters.")
parser.add_argument('--config', help="Path to the configuration file.")
parser.add_argument('--mode', default='lerp', choices=('lerp', 'slerp'),
help="Interpolation mode.")
parser.add_argument('--runs', type=int, default="1",
help="Times to run the inference process.")
parser.add_argument('--rows', type=int, default=5,
help="Number of images per row to be generated.")
parser.add_argument('--columns', type=int, default=5,
help="Number of images per column to be generated.")
parser.add_argument('--lower', type=float, default=-2,
help="Lower bound of the truncated normal random "
"variables.")
parser.add_argument('--upper', type=float, default=2,
help="Upper bound of the truncated normal random "
"variables.")
parser.add_argument('--gpu', '--gpu_device_num', type=str, default="0",
help="The GPU device number to use.")
args = parser.parse_args()
return args | Parse and return the command line arguments. | parse_arguments | python | salu133445/musegan | src/interpolation.py | https://github.com/salu133445/musegan/blob/master/src/interpolation.py | MIT |
def setup():
"""Parse command line arguments, load model parameters, load configurations
and setup environment."""
# Parse the command line arguments
args = parse_arguments()
# Load parameters
params = load_yaml(args.params)
# Load training configurations
config = load_yaml(args.config)
update_not_none(config, vars(args))
# Set unspecified schedule steps to default values
for target in (config['learning_rate_schedule'], config['slope_schedule']):
if target['start'] is None:
target['start'] = 0
if target['end'] is None:
target['end'] = config['steps']
# Make sure result directory exists
make_sure_path_exists(config['result_dir'])
# Setup GPUs
os.environ["CUDA_VISIBLE_DEVICES"] = config['gpu']
return params, config | Parse command line arguments, load model parameters, load configurations
and setup environment. | setup | python | salu133445/musegan | src/interpolation.py | https://github.com/salu133445/musegan/blob/master/src/interpolation.py | MIT |
def slerp(a, b, t):
"""Return the spherical linear interpolation of point `a` and `b` at
specific ratio `t`."""
omega = np.arccos(np.dot(a / np.linalg.norm(a), b / np.linalg.norm(b)))
so = np.sin(omega)
return np.sin((1 - t) * omega) / so * a + np.sin(t * omega) / so * b | Return the spherical linear interpolation of point `a` and `b` at
specific ratio `t`. | slerp | python | salu133445/musegan | src/interpolation.py | https://github.com/salu133445/musegan/blob/master/src/interpolation.py | MIT |
def lerp(a, b1, b2, t1, t2):
"""Return the 2D linear interpolation of point `a`, `b1` and `b2` at
specific ratio `t1` and `t2`."""
inter1 = a * (1 - t1) + b1 * t1
inter2 = b2 * (1 - t1) + (b2 + (b1 - a)) * t1
return inter1 * (1 - t2) + t2 * inter2 | Return the 2D linear interpolation of point `a`, `b1` and `b2` at
specific ratio `t1` and `t2`. | lerp | python | salu133445/musegan | src/interpolation.py | https://github.com/salu133445/musegan/blob/master/src/interpolation.py | MIT |
def get_input_z(config, params):
"""Return input latent code to the generator."""
if ((config['mode'] == 'slerp') and (config['rows'] > 1)
and (config['columns'] > 1)):
raise ValueError("Only supports 1D interpolation in 'slerp' mode.")
a = scipy.stats.truncnorm.rvs(
config['lower'], config['upper'], size=params['latent_dim'])
b = scipy.stats.truncnorm.rvs(
config['lower'], config['upper'], size=params['latent_dim'])
# Slerp
if config['mode'] == 'slerp':
return np.array([slerp(a, b, t) for t in np.linspace(
0, 1, (config['rows'] * config['columns']))])
# Lerp
b2 = scipy.stats.truncnorm.rvs(
config['lower'], config['upper'], size=params['latent_dim'])
x = np.linspace(0, 1, config['rows'])
y = np.linspace(0, 1, config['columns'])
xv, yv = np.meshgrid(x, y)
xv = xv.flatten()
yv = yv.flatten()
return np.array([lerp(a, b, b2, t1, t2) for t1, t2 in zip(xv, yv)]) | Return input latent code to the generator. | get_input_z | python | salu133445/musegan | src/interpolation.py | https://github.com/salu133445/musegan/blob/master/src/interpolation.py | MIT |
def parse_arguments():
"""Parse and return the command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("filepath", help="Path to the data file.")
parser.add_argument(
"--name",
help="File name to save in SharedArray. Defaults to the original file name.",
)
parser.add_argument(
"--prefix",
help="Prefix to the file name to save in SharedArray. Only effective when "
"`name` is not given.",
)
parser.add_argument(
"--dtype", default="bool", help="Datatype of the array. Defaults to bool."
)
args = parser.parse_args()
return args.filepath, args.name, args.prefix, args.dtype | Parse and return the command line arguments. | parse_arguments | python | salu133445/musegan | src/process_data.py | https://github.com/salu133445/musegan/blob/master/src/process_data.py | MIT |
def create_shared_array(name, shape, dtype):
"""Create shared array. Prompt if a file with the same name existed."""
try:
return sa.create(name, shape, dtype)
except FileExistsError:
response = ""
while response.lower() not in ["y", "n", "yes", "no"]:
response = input(
"Existing array (also named " + name + ") was found. Replace it? (y/n) "
)
if response.lower() in ("n", "no"):
sys.exit(0)
sa.delete(name)
return sa.create(name, shape, dtype) | Create shared array. Prompt if a file with the same name existed. | create_shared_array | python | salu133445/musegan | src/process_data.py | https://github.com/salu133445/musegan/blob/master/src/process_data.py | MIT |
def main():
"""Load and save an array to shared memory."""
filepath, name, prefix, dtype = parse_arguments()
if name is None:
name = os.path.splitext(os.path.basename(filepath))[0]
if prefix is not None:
name = prefix + "_" + name
print("Loading data from '{}'.".format(filepath))
if filepath.endswith(".npy"):
data = np.load(filepath)
data = data.astype(dtype)
sa_array = create_shared_array(name, data.shape, data.dtype)
print("Saving data to shared memory...")
np.copyto(sa_array, data)
else:
with np.load(filepath) as loaded:
sa_array = create_shared_array(name, loaded["shape"], dtype)
print("Saving data to shared memory...")
sa_array[[x for x in loaded["nonzero"]]] = 1
print(
"Successfully saved: (name='{}', shape={}, dtype={})".format(
name, sa_array.shape, sa_array.dtype
)
) | Load and save an array to shared memory. | main | python | salu133445/musegan | src/process_data.py | https://github.com/salu133445/musegan/blob/master/src/process_data.py | MIT |
def parse_arguments():
"""Parse and return the command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('--exp_dir', help="Directory to save all the results.")
parser.add_argument('--params', help="Path to the model parameter file.")
parser.add_argument('--config', help="Path to the configuration file.")
parser.add_argument('--gpu', '--gpu_device_num', type=str, default="0",
help="The GPU device number to use.")
parser.add_argument('--n_jobs', type=int,
help="Number of parallel calls to use for input "
"pipeline. Set to 1 to disable multiprocessing.")
args = parser.parse_args()
return args | Parse and return the command line arguments. | parse_arguments | python | salu133445/musegan | src/train.py | https://github.com/salu133445/musegan/blob/master/src/train.py | MIT |
def setup_dirs(config):
"""Setup an experiment directory structure and update the `params`
dictionary with the directory paths."""
# Get experiment directory structure
config['exp_dir'] = os.path.realpath(config['exp_dir'])
config['src_dir'] = os.path.join(config['exp_dir'], 'src')
config['eval_dir'] = os.path.join(config['exp_dir'], 'eval')
config['model_dir'] = os.path.join(config['exp_dir'], 'model')
config['sample_dir'] = os.path.join(config['exp_dir'], 'samples')
config['log_dir'] = os.path.join(config['exp_dir'], 'logs', 'train')
# Make sure directories exist
for key in ('log_dir', 'model_dir', 'sample_dir', 'src_dir'):
make_sure_path_exists(config[key]) | Setup an experiment directory structure and update the `params`
dictionary with the directory paths. | setup_dirs | python | salu133445/musegan | src/train.py | https://github.com/salu133445/musegan/blob/master/src/train.py | MIT |
def setup():
"""Parse command line arguments, load model parameters, load configurations,
setup environment and setup loggers."""
# Parse the command line arguments
args = parse_arguments()
# Load parameters
params = load_yaml(args.params)
if params.get('is_accompaniment') and params.get('condition_track_idx') is None:
raise TypeError("`condition_track_idx` cannot be None type in "
"accompaniment mode.")
# Load configurations
config = load_yaml(args.config)
update_not_none(config, vars(args))
# Set unspecified schedule steps to default values
for target in (config['learning_rate_schedule'], config['slope_schedule']):
if target['start'] is None:
target['start'] = 0
if target['end'] is None:
target['end'] = config['steps']
# Setup experiment directories and update them to configuration dictionary
setup_dirs(config)
# Setup loggers
del logging.getLogger('tensorflow').handlers[0]
setup_loggers(config['log_dir'])
# Setup GPUs
os.environ["CUDA_VISIBLE_DEVICES"] = config['gpu']
# Backup source code
backup_src(config['src_dir'])
return params, config | Parse command line arguments, load model parameters, load configurations,
setup environment and setup loggers. | setup | python | salu133445/musegan | src/train.py | https://github.com/salu133445/musegan/blob/master/src/train.py | MIT |
def load_training_data(params, config):
"""Load and return the training data."""
# Load data
if params['is_conditional']:
raise ValueError("Not supported yet.")
else:
labels = None
LOGGER.info("Loading training data.")
data = load_data(config['data_source'], config['data_filename'])
LOGGER.info("Training data size: %d", len(data))
# Build dataset
LOGGER.info("Building dataset.")
dataset = get_dataset(
data, labels, config['batch_size'], params['data_shape'],
config['use_random_transpose'], config['n_jobs'])
# Create iterator
if params['is_conditional']:
train_x, train_y = dataset.make_one_shot_iterator().get_next()
else:
train_x, train_y = dataset.make_one_shot_iterator().get_next(), None
return train_x, train_y | Load and return the training data. | load_training_data | python | salu133445/musegan | src/train.py | https://github.com/salu133445/musegan/blob/master/src/train.py | MIT |
def load_or_create_samples(params, config):
"""Load or create the samples used as the sampler inputs."""
# Load sample_z
LOGGER.info("Loading sample_z.")
sample_z_path = os.path.join(config['model_dir'], 'sample_z.npy')
if os.path.exists(sample_z_path):
sample_z = np.load(sample_z_path)
if sample_z.shape[1] != params['latent_dim']:
LOGGER.info("Loaded sample_z has wrong shape")
resample = True
else:
resample = False
else:
LOGGER.info("File for sample_z not found")
resample = True
# Draw new sample_z
if resample:
LOGGER.info("Drawing new sample_z.")
sample_z = scipy.stats.truncnorm.rvs(
-2, 2, size=(np.prod(config['sample_grid']), params['latent_dim']))
make_sure_path_exists(config['model_dir'])
np.save(sample_z_path, sample_z)
if params.get('is_accompaniment'):
# Load sample_x
LOGGER.info("Loading sample_x.")
sample_x_path = os.path.join(config['model_dir'], 'sample_x.npy')
if os.path.exists(sample_x_path):
sample_x = np.load(sample_x_path)
if sample_x.shape[1:] != params['data_shape']:
LOGGER.info("Loaded sample_x has wrong shape")
resample = True
else:
resample = False
else:
LOGGER.info("File for sample_x not found")
resample = True
# Draw new sample_x
if resample:
LOGGER.info("Drawing new sample_x.")
data = load_data(config['data_source'], config['data_filename'])
sample_x = get_samples(
np.prod(config['sample_grid']), data,
use_random_transpose = config['use_random_transpose'])
make_sure_path_exists(config['model_dir'])
np.save(sample_x_path, sample_x)
else:
sample_x = None
return sample_x, None, sample_z | Load or create the samples used as the sampler inputs. | load_or_create_samples | python | salu133445/musegan | src/train.py | https://github.com/salu133445/musegan/blob/master/src/train.py | MIT |
def get_n_params(var_list):
"""Return the number of variables in a variable list."""
return int(np.sum([np.product(
[x.value for x in var.get_shape()]) for var in var_list])) | Return the number of variables in a variable list. | get_n_params | python | salu133445/musegan | src/train.py | https://github.com/salu133445/musegan/blob/master/src/train.py | MIT |
def load_data_from_npz(filename):
"""Load and return the training data from a npz file (sparse format)."""
with np.load(filename) as f:
data = np.zeros(f['shape'], np.bool_)
data[[x for x in f['nonzero']]] = True
return data | Load and return the training data from a npz file (sparse format). | load_data_from_npz | python | salu133445/musegan | src/musegan/data.py | https://github.com/salu133445/musegan/blob/master/src/musegan/data.py | MIT |
def load_data(data_source, data_filename):
"""Load and return the training data."""
if data_source == 'sa':
import SharedArray as sa
return sa.attach(data_filename)
if data_source == 'npy':
return load_data_from_npy(data_filename)
if data_source == 'npz':
return load_data_from_npz(data_filename)
raise ValueError("Expect `data_source` to be one of 'sa', 'npy', 'npz'. "
"But get " + str(data_source)) | Load and return the training data. | load_data | python | salu133445/musegan | src/musegan/data.py | https://github.com/salu133445/musegan/blob/master/src/musegan/data.py | MIT |
def get_samples(n_samples, data, labels=None, use_random_transpose=False):
"""Return some random samples of the training data."""
indices = np.random.choice(len(data), n_samples, False)
if np.issubdtype(data.dtype, np.bool_):
sample_data = data[indices] * 2. - 1.
else:
sample_data = data[indices]
if use_random_transpose:
sample_data = np.array([random_transpose(x) for x in sample_data])
if labels is None:
return sample_data
return sample_data, labels[indices] | Return some random samples of the training data. | get_samples | python | salu133445/musegan | src/musegan/data.py | https://github.com/salu133445/musegan/blob/master/src/musegan/data.py | MIT |
def get_dataset(data, labels=None, batch_size=None, data_shape=None,
use_random_transpose=False, num_threads=1):
"""Create and return a tensorflow dataset from an array."""
if labels is None:
dataset = tf.data.Dataset.from_generator(
lambda: _gen_data(data), tf.float32)
if use_random_transpose:
dataset = dataset.map(
lambda pianoroll: tf.py_func(
random_transpose, [pianoroll], tf.float32),
num_parallel_calls=num_threads)
dataset = dataset.map(lambda pianoroll: set_pianoroll_shape(
pianoroll, data_shape), num_parallel_calls=num_threads)
else:
assert len(data) == len(labels), (
"Lengths of `data` and `lables` do not match.")
dataset = tf.data.Dataset.from_generator(
lambda: _gen_data(data, labels), [tf.float32, tf.int32])
if use_random_transpose:
dataset = dataset.map(
lambda pianoroll, label: (
tf.py_func(random_transpose, [pianoroll], tf.float32),
label),
num_parallel_calls=num_threads)
dataset = dataset.map(
lambda pianoroll, label: (set_pianoroll_shape(
pianoroll, data_shape), set_label_shape(label)),
num_parallel_calls=num_threads)
dataset = dataset.shuffle(SHUFFLE_BUFFER_SIZE).repeat().batch(batch_size)
return dataset.prefetch(PREFETCH_SIZE) | Create and return a tensorflow dataset from an array. | get_dataset | python | salu133445/musegan | src/musegan/data.py | https://github.com/salu133445/musegan/blob/master/src/musegan/data.py | MIT |
def vector_to_image(array, inverted=True):
"""
Convert a batched vector array to an image array.
Arguments
---------
array : `np.array`, ndim=2
The vector array.
Returns
-------
image : `np.array`, ndim=4
The image array.
"""
if array.ndim != 2:
raise ValueError("Input array must have 2 dimensions.")
# Scale the array to [-1, 1] (assume that the values range in [-2, 2])
array = .25 * (array + 2.)
# Invert the color
if inverted:
array = 1. - array
# Minus a small value to avoid casting 256 to 0
quantized = (array * 256 - 1e-5).astype(np.uint8)
# Reshape to an image array
image = np.reshape(quantized, (-1, quantized.shape[1], 1, 1))
return image |
Convert a batched vector array to an image array.
Arguments
---------
array : `np.array`, ndim=2
The vector array.
Returns
-------
image : `np.array`, ndim=4
The image array.
| vector_to_image | python | salu133445/musegan | src/musegan/io_utils.py | https://github.com/salu133445/musegan/blob/master/src/musegan/io_utils.py | MIT |
def pianoroll_to_image(pianoroll, colormap=None, inverted=True,
boundary_width=1, boundary_color=0, frame=False,
gamma=1.):
"""
Convert a batched pianoroll array to an image array.
Arguments
---------
pianoroll : `np.array`, ndim=5
The pianoroll array. The shape is (n_pianorolls, n_bars, n_timestep,
n_pitches, n_tracks).
boundary_width : int
Linewidth of the boundary lines. Default to 0.
boundary_color : int
Grayscale of the boundary lines. Valid values are 0 (black) to 255
(white). Default to 0.
frame : bool
Whether to use a grid frame. Default to False.
Returns
-------
image : `np.array`, ndim=4
The image array.
"""
if pianoroll.ndim != 5:
raise ValueError("Input pianoroll array must have 5 dimensions.")
# Flip the pitch axis
pianoroll = np.flip(pianoroll, 3)
# Apply the color
if colormap is not None:
pianoroll = np.matmul(1. - colormap, np.expand_dims(pianoroll, -1))
pianoroll = pianoroll.squeeze(-1).clip(0., 1.)
# Apply gamma correction
if gamma != 1.:
pianoroll = pianoroll ** gamma
# Invert the color
if inverted:
pianoroll = 1. - pianoroll
# Quantize the image (minus a small value to avoid casting 256 to 0)
quantized = (pianoroll * 256 - 1e-5).astype(np.uint8)
# Add the boundary lines
if boundary_width:
quantized = np.pad(
quantized,
((0, 0), (0, 0), (boundary_width, 0), (boundary_width, 0), (0, 0)),
'constant', constant_values=boundary_color)
# Transpose and reshape to get the image array
if colormap is None:
transposed = np.transpose(quantized, (0, 4, 3, 1, 2))
image = np.reshape(
transposed, (-1, transposed.shape[1] * transposed.shape[2],
transposed.shape[3] * transposed.shape[4], 1))
else:
transposed = np.transpose(quantized, (0, 3, 1, 2, 4))
image = np.reshape(transposed, (
-1, transposed.shape[1], transposed.shape[2] * transposed.shape[3],
transposed.shape[4]))
# Deal with the frame
if boundary_width:
if frame:
image = np.pad(
image,
((0, 0), (0, boundary_width), (0, boundary_width), (0, 0)),
'constant', constant_values=boundary_color)
else:
image = image[:, boundary_width:, boundary_width:]
return image |
Convert a batched pianoroll array to an image array.
Arguments
---------
pianoroll : `np.array`, ndim=5
The pianoroll array. The shape is (n_pianorolls, n_bars, n_timestep,
n_pitches, n_tracks).
boundary_width : int
Linewidth of the boundary lines. Default to 0.
boundary_color : int
Grayscale of the boundary lines. Valid values are 0 (black) to 255
(white). Default to 0.
frame : bool
Whether to use a grid frame. Default to False.
Returns
-------
image : `np.array`, ndim=4
The image array.
| pianoroll_to_image | python | salu133445/musegan | src/musegan/io_utils.py | https://github.com/salu133445/musegan/blob/master/src/musegan/io_utils.py | MIT |
def image_pair(image1, image2, mode='side-by-side', boundary_width=1,
boundary_color=0, frame=False):
"""
Pair two image arrays to one single image array.
Arguments
---------
image1 : `np.array`, ndim=4
The image array at the left in 'side-by-side' mode or at the top in
'top-bottom' mode.
image2 : `np.array`, ndim=4
The image array at the right in 'side-by-side' mode or at the bottom in
'top-bottom' mode.
mode : {'side-by-side', 'top-bottom'}
Mode to pack the two images.
boundary_width : int
Linewidth of the boundary lines. Default to 0.
boundary_color : int
Grayscale of the boundary lines. Valid values are 0 (black) to 255
(white). Default to 0.
frame : bool
Whether to use a grid frame. Default to False.
Returns
-------
image : `np.array`
The image array.
"""
if image1.ndim != image2.ndim:
raise ValueError("Input image arrays must have the same number of "
"dimensions.")
if not (mode == 'side-by-side' or mode == 'top-down'):
raise ValueError("Invalid mode received. Valid modes are "
"'side-by-side' and 'top-down'.")
if mode == 'side-by-side':
if boundary_width:
# Add the boundary line
image1 = np.pad(
image1, ((0, 0), (0, 0), (0, boundary_width), (0, 0)),
'constant', constant_values=boundary_color)
image = np.concatenate((image1, image2), 2)
elif mode == 'top-down':
if boundary_width:
# Add the boundary line
image1 = np.pad(
image1, ((0, 0), (0, boundary_width), (0, 0), (0, 0)),
'constant', constant_values=boundary_color)
image = np.concatenate((image1, image2), 1)
if frame:
image = np.pad(image, ((0, 0), (boundary_width, boundary_width),
(boundary_width, boundary_width), (0, 0)),
'constant', constant_values=boundary_color)
return image |
Pair two image arrays to one single image array.
Arguments
---------
image1 : `np.array`, ndim=4
The image array at the left in 'side-by-side' mode or at the top in
'top-bottom' mode.
image2 : `np.array`, ndim=4
The image array at the right in 'side-by-side' mode or at the bottom in
'top-bottom' mode.
mode : {'side-by-side', 'top-bottom'}
Mode to pack the two images.
boundary_width : int
Linewidth of the boundary lines. Default to 0.
boundary_color : int
Grayscale of the boundary lines. Valid values are 0 (black) to 255
(white). Default to 0.
frame : bool
Whether to use a grid frame. Default to False.
Returns
-------
image : `np.array`
The image array.
| image_pair | python | salu133445/musegan | src/musegan/io_utils.py | https://github.com/salu133445/musegan/blob/master/src/musegan/io_utils.py | MIT |
def image_grid(image, grid_shape, grid_width=3, grid_color=0, frame=True):
"""
Convert a batched image array to one merged grid image array.
Arguments
---------
pianoroll : `np.array`, ndim=4
The pianoroll array. The first axis is the batch axis. The second and
third axes are the time and pitch axes, respectively, of the pianorolls.
The last axis is the track axis.
grid_shape : list or tuple of int
Shape of the image grid (height, width).
grid_width : int
Linewidth of the grid. Default to 0.
grid_color : int
Grayscale of the grid. Valid values are 0 (black) to 255 (white).
Default to 0.
frame : bool
Whether to use a grid frame. Default to False.
Returns
-------
merged : `np.array`, ndim=3
The merged image grid array.
"""
if len(grid_shape) != 2:
raise ValueError("`grid_shape` must be a list or tuple of two "
"integers.")
if image.ndim != 4:
raise ValueError("Input image array must have 4 dimensions.")
# Slice the array to get the right number of images
sliced = image[:(grid_shape[0] * grid_shape[1])]
# Add the grid lines
if grid_width:
sliced = np.pad(
sliced, ((0, 0), (grid_width, 0), (grid_width, 0), (0, 0)),
'constant', constant_values=grid_color)
# Reshape to split the first (batch) axis into two axes
reshaped = np.reshape(sliced, ((grid_shape[0], grid_shape[1])
+ sliced.shape[1:]))
# Transpose and reshape to get the image grid
transposed = np.transpose(reshaped, (0, 2, 1, 3, 4))
grid = np.reshape(
transposed, (grid_shape[0] * transposed.shape[1],
grid_shape[1] * transposed.shape[3], image.shape[-1]))
# Deal with the frame
if grid_width:
if frame:
grid = np.pad(grid, ((0, grid_width), (0, grid_width), (0, 0)),
'constant', constant_values=grid_color)
else:
grid = grid[:, grid_width:, grid_width:]
return grid |
Convert a batched image array to one merged grid image array.
Arguments
---------
pianoroll : `np.array`, ndim=4
The pianoroll array. The first axis is the batch axis. The second and
third axes are the time and pitch axes, respectively, of the pianorolls.
The last axis is the track axis.
grid_shape : list or tuple of int
Shape of the image grid (height, width).
grid_width : int
Linewidth of the grid. Default to 0.
grid_color : int
Grayscale of the grid. Valid values are 0 (black) to 255 (white).
Default to 0.
frame : bool
Whether to use a grid frame. Default to False.
Returns
-------
merged : `np.array`, ndim=3
The merged image grid array.
| image_grid | python | salu133445/musegan | src/musegan/io_utils.py | https://github.com/salu133445/musegan/blob/master/src/musegan/io_utils.py | MIT |
def save_pianoroll(filename, pianoroll, programs, is_drums, tempo,
beat_resolution, lowest_pitch):
"""Saves a batched pianoroll array to a npz file."""
if not np.issubdtype(pianoroll.dtype, np.bool_):
raise TypeError("Input pianoroll array must have a boolean dtype.")
if pianoroll.ndim != 5:
raise ValueError("Input pianoroll array must have 5 dimensions.")
if pianoroll.shape[-1] != len(programs):
raise ValueError("Length of `programs` does not match the number of "
"tracks for the input array.")
if pianoroll.shape[-1] != len(is_drums):
raise ValueError("Length of `is_drums` does not match the number of "
"tracks for the input array.")
reshaped = pianoroll.reshape(
-1, pianoroll.shape[1] * pianoroll.shape[2], pianoroll.shape[3],
pianoroll.shape[4])
# Pad to the correct pitch range and add silence between phrases
to_pad_pitch_high = 128 - lowest_pitch - pianoroll.shape[3]
padded = np.pad(
reshaped, ((0, 0), (0, pianoroll.shape[2]),
(lowest_pitch, to_pad_pitch_high), (0, 0)), 'constant')
# Reshape the batched pianoroll array to a single pianoroll array
pianoroll_ = padded.reshape(-1, padded.shape[2], padded.shape[3])
# Create the tracks
tracks = []
for idx in range(pianoroll_.shape[2]):
tracks.append(pypianoroll.Track(
pianoroll_[..., idx], programs[idx], is_drums[idx]))
# Create and save the multitrack
multitrack = pypianoroll.Multitrack(
tracks=tracks, tempo=tempo, beat_resolution=beat_resolution)
multitrack.save(filename) | Saves a batched pianoroll array to a npz file. | save_pianoroll | python | salu133445/musegan | src/musegan/io_utils.py | https://github.com/salu133445/musegan/blob/master/src/musegan/io_utils.py | MIT |
Subsets and Splits
Django Code with Docstrings
Filters Python code examples from Django repository that contain Django-related code, helping identify relevant code snippets for understanding Django framework usage patterns.
SQL Console for Shuu12121/python-treesitter-filtered-datasetsV2
Retrieves specific code examples from the Flask repository but doesn't provide meaningful analysis or patterns beyond basic data retrieval.
HTTPX Repo Code and Docstrings
Retrieves specific code examples from the httpx repository, which is useful for understanding how particular libraries are used but doesn't provide broader analytical insights about the dataset.
Requests Repo Docstrings & Code
Retrieves code examples with their docstrings and file paths from the requests repository, providing basic filtering but limited analytical value beyond finding specific code samples.
Quart Repo Docstrings & Code
Retrieves code examples with their docstrings from the Quart repository, providing basic code samples but offering limited analytical value for understanding broader patterns or relationships in the dataset.