repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
twisted/txaws | txaws/ec2/client.py | EC2Client.describe_instances | def describe_instances(self, *instance_ids):
"""Describe current instances."""
instances = {}
for pos, instance_id in enumerate(instance_ids):
instances["InstanceId.%d" % (pos + 1)] = instance_id
query = self.query_factory(
action="DescribeInstances", creds=self.creds,
endpoint=self.endpoint, other_params=instances)
d = query.submit()
return d.addCallback(self.parser.describe_instances) | python | def describe_instances(self, *instance_ids):
"""Describe current instances."""
instances = {}
for pos, instance_id in enumerate(instance_ids):
instances["InstanceId.%d" % (pos + 1)] = instance_id
query = self.query_factory(
action="DescribeInstances", creds=self.creds,
endpoint=self.endpoint, other_params=instances)
d = query.submit()
return d.addCallback(self.parser.describe_instances) | [
"def",
"describe_instances",
"(",
"self",
",",
"*",
"instance_ids",
")",
":",
"instances",
"=",
"{",
"}",
"for",
"pos",
",",
"instance_id",
"in",
"enumerate",
"(",
"instance_ids",
")",
":",
"instances",
"[",
"\"InstanceId.%d\"",
"%",
"(",
"pos",
"+",
"1",
... | Describe current instances. | [
"Describe",
"current",
"instances",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/ec2/client.py#L39-L48 | train | 36,800 |
twisted/txaws | txaws/ec2/client.py | EC2Client.run_instances | def run_instances(self, image_id, min_count, max_count,
security_groups=None, key_name=None, instance_type=None,
user_data=None, availability_zone=None, kernel_id=None,
ramdisk_id=None, subnet_id=None, security_group_ids=None):
"""Run new instances.
TODO: blockDeviceMapping, monitoring, subnetId
"""
params = {"ImageId": image_id, "MinCount": str(min_count),
"MaxCount": str(max_count)}
if key_name is not None:
params["KeyName"] = key_name
if subnet_id is not None:
params["SubnetId"] = subnet_id
if security_group_ids is not None:
for i, id in enumerate(security_group_ids):
params["SecurityGroupId.%d" % (i + 1)] = id
else:
msg = "You must specify the security_group_ids with the subnet_id"
raise ValueError(msg)
elif security_groups is not None:
for i, name in enumerate(security_groups):
params["SecurityGroup.%d" % (i + 1)] = name
else:
msg = ("You must specify either the subnet_id and "
"security_group_ids or security_groups")
raise ValueError(msg)
if user_data is not None:
params["UserData"] = b64encode(user_data)
if instance_type is not None:
params["InstanceType"] = instance_type
if availability_zone is not None:
params["Placement.AvailabilityZone"] = availability_zone
if kernel_id is not None:
params["KernelId"] = kernel_id
if ramdisk_id is not None:
params["RamdiskId"] = ramdisk_id
query = self.query_factory(
action="RunInstances", creds=self.creds, endpoint=self.endpoint,
other_params=params)
d = query.submit()
return d.addCallback(self.parser.run_instances) | python | def run_instances(self, image_id, min_count, max_count,
security_groups=None, key_name=None, instance_type=None,
user_data=None, availability_zone=None, kernel_id=None,
ramdisk_id=None, subnet_id=None, security_group_ids=None):
"""Run new instances.
TODO: blockDeviceMapping, monitoring, subnetId
"""
params = {"ImageId": image_id, "MinCount": str(min_count),
"MaxCount": str(max_count)}
if key_name is not None:
params["KeyName"] = key_name
if subnet_id is not None:
params["SubnetId"] = subnet_id
if security_group_ids is not None:
for i, id in enumerate(security_group_ids):
params["SecurityGroupId.%d" % (i + 1)] = id
else:
msg = "You must specify the security_group_ids with the subnet_id"
raise ValueError(msg)
elif security_groups is not None:
for i, name in enumerate(security_groups):
params["SecurityGroup.%d" % (i + 1)] = name
else:
msg = ("You must specify either the subnet_id and "
"security_group_ids or security_groups")
raise ValueError(msg)
if user_data is not None:
params["UserData"] = b64encode(user_data)
if instance_type is not None:
params["InstanceType"] = instance_type
if availability_zone is not None:
params["Placement.AvailabilityZone"] = availability_zone
if kernel_id is not None:
params["KernelId"] = kernel_id
if ramdisk_id is not None:
params["RamdiskId"] = ramdisk_id
query = self.query_factory(
action="RunInstances", creds=self.creds, endpoint=self.endpoint,
other_params=params)
d = query.submit()
return d.addCallback(self.parser.run_instances) | [
"def",
"run_instances",
"(",
"self",
",",
"image_id",
",",
"min_count",
",",
"max_count",
",",
"security_groups",
"=",
"None",
",",
"key_name",
"=",
"None",
",",
"instance_type",
"=",
"None",
",",
"user_data",
"=",
"None",
",",
"availability_zone",
"=",
"Non... | Run new instances.
TODO: blockDeviceMapping, monitoring, subnetId | [
"Run",
"new",
"instances",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/ec2/client.py#L50-L91 | train | 36,801 |
twisted/txaws | txaws/ec2/client.py | EC2Client.get_console_output | def get_console_output(self, instance_id):
"""Get the console output for a single instance."""
InstanceIDParam = {"InstanceId": instance_id}
query = self.query_factory(
action="GetConsoleOutput", creds=self.creds,
endpoint=self.endpoint, other_params=InstanceIDParam)
d = query.submit()
return d.addCallback(self.parser.get_console_output) | python | def get_console_output(self, instance_id):
"""Get the console output for a single instance."""
InstanceIDParam = {"InstanceId": instance_id}
query = self.query_factory(
action="GetConsoleOutput", creds=self.creds,
endpoint=self.endpoint, other_params=InstanceIDParam)
d = query.submit()
return d.addCallback(self.parser.get_console_output) | [
"def",
"get_console_output",
"(",
"self",
",",
"instance_id",
")",
":",
"InstanceIDParam",
"=",
"{",
"\"InstanceId\"",
":",
"instance_id",
"}",
"query",
"=",
"self",
".",
"query_factory",
"(",
"action",
"=",
"\"GetConsoleOutput\"",
",",
"creds",
"=",
"self",
"... | Get the console output for a single instance. | [
"Get",
"the",
"console",
"output",
"for",
"a",
"single",
"instance",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/ec2/client.py#L109-L116 | train | 36,802 |
twisted/txaws | txaws/ec2/client.py | EC2Client.describe_security_groups | def describe_security_groups(self, *names):
"""Describe security groups.
@param names: Optionally, a list of security group names to describe.
Defaults to all security groups in the account.
@return: A C{Deferred} that will fire with a list of L{SecurityGroup}s
retrieved from the cloud.
"""
group_names = {}
if names:
group_names = dict([("GroupName.%d" % (i + 1), name)
for i, name in enumerate(names)])
query = self.query_factory(
action="DescribeSecurityGroups", creds=self.creds,
endpoint=self.endpoint, other_params=group_names)
d = query.submit()
return d.addCallback(self.parser.describe_security_groups) | python | def describe_security_groups(self, *names):
"""Describe security groups.
@param names: Optionally, a list of security group names to describe.
Defaults to all security groups in the account.
@return: A C{Deferred} that will fire with a list of L{SecurityGroup}s
retrieved from the cloud.
"""
group_names = {}
if names:
group_names = dict([("GroupName.%d" % (i + 1), name)
for i, name in enumerate(names)])
query = self.query_factory(
action="DescribeSecurityGroups", creds=self.creds,
endpoint=self.endpoint, other_params=group_names)
d = query.submit()
return d.addCallback(self.parser.describe_security_groups) | [
"def",
"describe_security_groups",
"(",
"self",
",",
"*",
"names",
")",
":",
"group_names",
"=",
"{",
"}",
"if",
"names",
":",
"group_names",
"=",
"dict",
"(",
"[",
"(",
"\"GroupName.%d\"",
"%",
"(",
"i",
"+",
"1",
")",
",",
"name",
")",
"for",
"i",
... | Describe security groups.
@param names: Optionally, a list of security group names to describe.
Defaults to all security groups in the account.
@return: A C{Deferred} that will fire with a list of L{SecurityGroup}s
retrieved from the cloud. | [
"Describe",
"security",
"groups",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/ec2/client.py#L118-L134 | train | 36,803 |
twisted/txaws | txaws/ec2/client.py | EC2Client.create_security_group | def create_security_group(self, name, description, vpc_id=None):
"""Create security group.
@param name: Name of the new security group.
@param description: Description of the new security group.
@param vpc_id: ID of the VPC to which the security group will belong.
@return: A C{Deferred} that will fire with a truth value for the
success of the operation.
"""
parameters = {"GroupName": name, "GroupDescription": description}
if vpc_id:
parameters["VpcId"] = vpc_id
query = self.query_factory(
action="CreateSecurityGroup", creds=self.creds,
endpoint=self.endpoint, other_params=parameters)
d = query.submit()
return d.addCallback(self.parser.create_security_group) | python | def create_security_group(self, name, description, vpc_id=None):
"""Create security group.
@param name: Name of the new security group.
@param description: Description of the new security group.
@param vpc_id: ID of the VPC to which the security group will belong.
@return: A C{Deferred} that will fire with a truth value for the
success of the operation.
"""
parameters = {"GroupName": name, "GroupDescription": description}
if vpc_id:
parameters["VpcId"] = vpc_id
query = self.query_factory(
action="CreateSecurityGroup", creds=self.creds,
endpoint=self.endpoint, other_params=parameters)
d = query.submit()
return d.addCallback(self.parser.create_security_group) | [
"def",
"create_security_group",
"(",
"self",
",",
"name",
",",
"description",
",",
"vpc_id",
"=",
"None",
")",
":",
"parameters",
"=",
"{",
"\"GroupName\"",
":",
"name",
",",
"\"GroupDescription\"",
":",
"description",
"}",
"if",
"vpc_id",
":",
"parameters",
... | Create security group.
@param name: Name of the new security group.
@param description: Description of the new security group.
@param vpc_id: ID of the VPC to which the security group will belong.
@return: A C{Deferred} that will fire with a truth value for the
success of the operation. | [
"Create",
"security",
"group",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/ec2/client.py#L136-L152 | train | 36,804 |
twisted/txaws | txaws/ec2/client.py | EC2Client.describe_volumes | def describe_volumes(self, *volume_ids):
"""Describe available volumes."""
volumeset = {}
for pos, volume_id in enumerate(volume_ids):
volumeset["VolumeId.%d" % (pos + 1)] = volume_id
query = self.query_factory(
action="DescribeVolumes", creds=self.creds, endpoint=self.endpoint,
other_params=volumeset)
d = query.submit()
return d.addCallback(self.parser.describe_volumes) | python | def describe_volumes(self, *volume_ids):
"""Describe available volumes."""
volumeset = {}
for pos, volume_id in enumerate(volume_ids):
volumeset["VolumeId.%d" % (pos + 1)] = volume_id
query = self.query_factory(
action="DescribeVolumes", creds=self.creds, endpoint=self.endpoint,
other_params=volumeset)
d = query.submit()
return d.addCallback(self.parser.describe_volumes) | [
"def",
"describe_volumes",
"(",
"self",
",",
"*",
"volume_ids",
")",
":",
"volumeset",
"=",
"{",
"}",
"for",
"pos",
",",
"volume_id",
"in",
"enumerate",
"(",
"volume_ids",
")",
":",
"volumeset",
"[",
"\"VolumeId.%d\"",
"%",
"(",
"pos",
"+",
"1",
")",
"... | Describe available volumes. | [
"Describe",
"available",
"volumes",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/ec2/client.py#L355-L364 | train | 36,805 |
twisted/txaws | txaws/ec2/client.py | EC2Client.create_volume | def create_volume(self, availability_zone, size=None, snapshot_id=None):
"""Create a new volume."""
params = {"AvailabilityZone": availability_zone}
if ((snapshot_id is None and size is None) or
(snapshot_id is not None and size is not None)):
raise ValueError("Please provide either size or snapshot_id")
if size is not None:
params["Size"] = str(size)
if snapshot_id is not None:
params["SnapshotId"] = snapshot_id
query = self.query_factory(
action="CreateVolume", creds=self.creds, endpoint=self.endpoint,
other_params=params)
d = query.submit()
return d.addCallback(self.parser.create_volume) | python | def create_volume(self, availability_zone, size=None, snapshot_id=None):
"""Create a new volume."""
params = {"AvailabilityZone": availability_zone}
if ((snapshot_id is None and size is None) or
(snapshot_id is not None and size is not None)):
raise ValueError("Please provide either size or snapshot_id")
if size is not None:
params["Size"] = str(size)
if snapshot_id is not None:
params["SnapshotId"] = snapshot_id
query = self.query_factory(
action="CreateVolume", creds=self.creds, endpoint=self.endpoint,
other_params=params)
d = query.submit()
return d.addCallback(self.parser.create_volume) | [
"def",
"create_volume",
"(",
"self",
",",
"availability_zone",
",",
"size",
"=",
"None",
",",
"snapshot_id",
"=",
"None",
")",
":",
"params",
"=",
"{",
"\"AvailabilityZone\"",
":",
"availability_zone",
"}",
"if",
"(",
"(",
"snapshot_id",
"is",
"None",
"and",... | Create a new volume. | [
"Create",
"a",
"new",
"volume",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/ec2/client.py#L366-L380 | train | 36,806 |
twisted/txaws | txaws/ec2/client.py | EC2Client.describe_snapshots | def describe_snapshots(self, *snapshot_ids):
"""Describe available snapshots.
TODO: ownerSet, restorableBySet
"""
snapshot_set = {}
for pos, snapshot_id in enumerate(snapshot_ids):
snapshot_set["SnapshotId.%d" % (pos + 1)] = snapshot_id
query = self.query_factory(
action="DescribeSnapshots", creds=self.creds,
endpoint=self.endpoint, other_params=snapshot_set)
d = query.submit()
return d.addCallback(self.parser.snapshots) | python | def describe_snapshots(self, *snapshot_ids):
"""Describe available snapshots.
TODO: ownerSet, restorableBySet
"""
snapshot_set = {}
for pos, snapshot_id in enumerate(snapshot_ids):
snapshot_set["SnapshotId.%d" % (pos + 1)] = snapshot_id
query = self.query_factory(
action="DescribeSnapshots", creds=self.creds,
endpoint=self.endpoint, other_params=snapshot_set)
d = query.submit()
return d.addCallback(self.parser.snapshots) | [
"def",
"describe_snapshots",
"(",
"self",
",",
"*",
"snapshot_ids",
")",
":",
"snapshot_set",
"=",
"{",
"}",
"for",
"pos",
",",
"snapshot_id",
"in",
"enumerate",
"(",
"snapshot_ids",
")",
":",
"snapshot_set",
"[",
"\"SnapshotId.%d\"",
"%",
"(",
"pos",
"+",
... | Describe available snapshots.
TODO: ownerSet, restorableBySet | [
"Describe",
"available",
"snapshots",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/ec2/client.py#L389-L401 | train | 36,807 |
twisted/txaws | txaws/ec2/client.py | EC2Client.delete_snapshot | def delete_snapshot(self, snapshot_id):
"""Remove a previously created snapshot."""
query = self.query_factory(
action="DeleteSnapshot", creds=self.creds, endpoint=self.endpoint,
other_params={"SnapshotId": snapshot_id})
d = query.submit()
return d.addCallback(self.parser.truth_return) | python | def delete_snapshot(self, snapshot_id):
"""Remove a previously created snapshot."""
query = self.query_factory(
action="DeleteSnapshot", creds=self.creds, endpoint=self.endpoint,
other_params={"SnapshotId": snapshot_id})
d = query.submit()
return d.addCallback(self.parser.truth_return) | [
"def",
"delete_snapshot",
"(",
"self",
",",
"snapshot_id",
")",
":",
"query",
"=",
"self",
".",
"query_factory",
"(",
"action",
"=",
"\"DeleteSnapshot\"",
",",
"creds",
"=",
"self",
".",
"creds",
",",
"endpoint",
"=",
"self",
".",
"endpoint",
",",
"other_p... | Remove a previously created snapshot. | [
"Remove",
"a",
"previously",
"created",
"snapshot",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/ec2/client.py#L414-L420 | train | 36,808 |
twisted/txaws | txaws/ec2/client.py | EC2Client.describe_keypairs | def describe_keypairs(self, *keypair_names):
"""Returns information about key pairs available."""
keypairs = {}
for index, keypair_name in enumerate(keypair_names):
keypairs["KeyName.%d" % (index + 1)] = keypair_name
query = self.query_factory(
action="DescribeKeyPairs", creds=self.creds,
endpoint=self.endpoint, other_params=keypairs)
d = query.submit()
return d.addCallback(self.parser.describe_keypairs) | python | def describe_keypairs(self, *keypair_names):
"""Returns information about key pairs available."""
keypairs = {}
for index, keypair_name in enumerate(keypair_names):
keypairs["KeyName.%d" % (index + 1)] = keypair_name
query = self.query_factory(
action="DescribeKeyPairs", creds=self.creds,
endpoint=self.endpoint, other_params=keypairs)
d = query.submit()
return d.addCallback(self.parser.describe_keypairs) | [
"def",
"describe_keypairs",
"(",
"self",
",",
"*",
"keypair_names",
")",
":",
"keypairs",
"=",
"{",
"}",
"for",
"index",
",",
"keypair_name",
"in",
"enumerate",
"(",
"keypair_names",
")",
":",
"keypairs",
"[",
"\"KeyName.%d\"",
"%",
"(",
"index",
"+",
"1",... | Returns information about key pairs available. | [
"Returns",
"information",
"about",
"key",
"pairs",
"available",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/ec2/client.py#L431-L440 | train | 36,809 |
twisted/txaws | txaws/ec2/client.py | EC2Client.create_keypair | def create_keypair(self, keypair_name):
"""
Create a new 2048 bit RSA key pair and return a unique ID that can be
used to reference the created key pair when launching new instances.
"""
query = self.query_factory(
action="CreateKeyPair", creds=self.creds, endpoint=self.endpoint,
other_params={"KeyName": keypair_name})
d = query.submit()
return d.addCallback(self.parser.create_keypair) | python | def create_keypair(self, keypair_name):
"""
Create a new 2048 bit RSA key pair and return a unique ID that can be
used to reference the created key pair when launching new instances.
"""
query = self.query_factory(
action="CreateKeyPair", creds=self.creds, endpoint=self.endpoint,
other_params={"KeyName": keypair_name})
d = query.submit()
return d.addCallback(self.parser.create_keypair) | [
"def",
"create_keypair",
"(",
"self",
",",
"keypair_name",
")",
":",
"query",
"=",
"self",
".",
"query_factory",
"(",
"action",
"=",
"\"CreateKeyPair\"",
",",
"creds",
"=",
"self",
".",
"creds",
",",
"endpoint",
"=",
"self",
".",
"endpoint",
",",
"other_pa... | Create a new 2048 bit RSA key pair and return a unique ID that can be
used to reference the created key pair when launching new instances. | [
"Create",
"a",
"new",
"2048",
"bit",
"RSA",
"key",
"pair",
"and",
"return",
"a",
"unique",
"ID",
"that",
"can",
"be",
"used",
"to",
"reference",
"the",
"created",
"key",
"pair",
"when",
"launching",
"new",
"instances",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/ec2/client.py#L442-L451 | train | 36,810 |
twisted/txaws | txaws/ec2/client.py | EC2Client.allocate_address | def allocate_address(self):
"""
Acquire an elastic IP address to be attached subsequently to EC2
instances.
@return: the IP address allocated.
"""
# XXX remove empty other_params
query = self.query_factory(
action="AllocateAddress", creds=self.creds, endpoint=self.endpoint,
other_params={})
d = query.submit()
return d.addCallback(self.parser.allocate_address) | python | def allocate_address(self):
"""
Acquire an elastic IP address to be attached subsequently to EC2
instances.
@return: the IP address allocated.
"""
# XXX remove empty other_params
query = self.query_factory(
action="AllocateAddress", creds=self.creds, endpoint=self.endpoint,
other_params={})
d = query.submit()
return d.addCallback(self.parser.allocate_address) | [
"def",
"allocate_address",
"(",
"self",
")",
":",
"# XXX remove empty other_params",
"query",
"=",
"self",
".",
"query_factory",
"(",
"action",
"=",
"\"AllocateAddress\"",
",",
"creds",
"=",
"self",
".",
"creds",
",",
"endpoint",
"=",
"self",
".",
"endpoint",
... | Acquire an elastic IP address to be attached subsequently to EC2
instances.
@return: the IP address allocated. | [
"Acquire",
"an",
"elastic",
"IP",
"address",
"to",
"be",
"attached",
"subsequently",
"to",
"EC2",
"instances",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/ec2/client.py#L485-L497 | train | 36,811 |
twisted/txaws | txaws/ec2/client.py | EC2Client.describe_addresses | def describe_addresses(self, *addresses):
"""
List the elastic IPs allocated in this account.
@param addresses: if specified, the addresses to get information about.
@return: a C{list} of (address, instance_id). If the elastic IP is not
associated currently, C{instance_id} will be C{None}.
"""
address_set = {}
for pos, address in enumerate(addresses):
address_set["PublicIp.%d" % (pos + 1)] = address
query = self.query_factory(
action="DescribeAddresses", creds=self.creds,
endpoint=self.endpoint, other_params=address_set)
d = query.submit()
return d.addCallback(self.parser.describe_addresses) | python | def describe_addresses(self, *addresses):
"""
List the elastic IPs allocated in this account.
@param addresses: if specified, the addresses to get information about.
@return: a C{list} of (address, instance_id). If the elastic IP is not
associated currently, C{instance_id} will be C{None}.
"""
address_set = {}
for pos, address in enumerate(addresses):
address_set["PublicIp.%d" % (pos + 1)] = address
query = self.query_factory(
action="DescribeAddresses", creds=self.creds,
endpoint=self.endpoint, other_params=address_set)
d = query.submit()
return d.addCallback(self.parser.describe_addresses) | [
"def",
"describe_addresses",
"(",
"self",
",",
"*",
"addresses",
")",
":",
"address_set",
"=",
"{",
"}",
"for",
"pos",
",",
"address",
"in",
"enumerate",
"(",
"addresses",
")",
":",
"address_set",
"[",
"\"PublicIp.%d\"",
"%",
"(",
"pos",
"+",
"1",
")",
... | List the elastic IPs allocated in this account.
@param addresses: if specified, the addresses to get information about.
@return: a C{list} of (address, instance_id). If the elastic IP is not
associated currently, C{instance_id} will be C{None}. | [
"List",
"the",
"elastic",
"IPs",
"allocated",
"in",
"this",
"account",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/ec2/client.py#L537-L553 | train | 36,812 |
twisted/txaws | txaws/ec2/client.py | Parser.describe_instances | def describe_instances(self, xml_bytes):
"""
Parse the reservations XML payload that is returned from an AWS
describeInstances API call.
Instead of returning the reservations as the "top-most" object, we
return the object that most developers and their code will be
interested in: the instances. In instances reservation is available on
the instance object.
The following instance attributes are optional:
* ami_launch_index
* key_name
* kernel_id
* product_codes
* ramdisk_id
* reason
@param xml_bytes: raw XML payload from AWS.
"""
root = XML(xml_bytes)
results = []
# May be a more elegant way to do this:
for reservation_data in root.find("reservationSet"):
# Create a reservation object with the parsed data.
reservation = model.Reservation(
reservation_id=reservation_data.findtext("reservationId"),
owner_id=reservation_data.findtext("ownerId"))
# Get the list of instances.
instances = self.instances_set(
reservation_data, reservation)
results.extend(instances)
return results | python | def describe_instances(self, xml_bytes):
"""
Parse the reservations XML payload that is returned from an AWS
describeInstances API call.
Instead of returning the reservations as the "top-most" object, we
return the object that most developers and their code will be
interested in: the instances. In instances reservation is available on
the instance object.
The following instance attributes are optional:
* ami_launch_index
* key_name
* kernel_id
* product_codes
* ramdisk_id
* reason
@param xml_bytes: raw XML payload from AWS.
"""
root = XML(xml_bytes)
results = []
# May be a more elegant way to do this:
for reservation_data in root.find("reservationSet"):
# Create a reservation object with the parsed data.
reservation = model.Reservation(
reservation_id=reservation_data.findtext("reservationId"),
owner_id=reservation_data.findtext("ownerId"))
# Get the list of instances.
instances = self.instances_set(
reservation_data, reservation)
results.extend(instances)
return results | [
"def",
"describe_instances",
"(",
"self",
",",
"xml_bytes",
")",
":",
"root",
"=",
"XML",
"(",
"xml_bytes",
")",
"results",
"=",
"[",
"]",
"# May be a more elegant way to do this:",
"for",
"reservation_data",
"in",
"root",
".",
"find",
"(",
"\"reservationSet\"",
... | Parse the reservations XML payload that is returned from an AWS
describeInstances API call.
Instead of returning the reservations as the "top-most" object, we
return the object that most developers and their code will be
interested in: the instances. In instances reservation is available on
the instance object.
The following instance attributes are optional:
* ami_launch_index
* key_name
* kernel_id
* product_codes
* ramdisk_id
* reason
@param xml_bytes: raw XML payload from AWS. | [
"Parse",
"the",
"reservations",
"XML",
"payload",
"that",
"is",
"returned",
"from",
"an",
"AWS",
"describeInstances",
"API",
"call",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/ec2/client.py#L626-L658 | train | 36,813 |
twisted/txaws | txaws/ec2/client.py | Parser.run_instances | def run_instances(self, xml_bytes):
"""
Parse the reservations XML payload that is returned from an AWS
RunInstances API call.
@param xml_bytes: raw XML bytes with a C{RunInstancesResponse} root
element.
"""
root = XML(xml_bytes)
# Get the security group information.
groups = []
for group_data in root.find("groupSet"):
group_id = group_data.findtext("groupId")
groups.append(group_id)
# Create a reservation object with the parsed data.
reservation = model.Reservation(
reservation_id=root.findtext("reservationId"),
owner_id=root.findtext("ownerId"),
groups=groups)
# Get the list of instances.
instances = self.instances_set(root, reservation)
return instances | python | def run_instances(self, xml_bytes):
"""
Parse the reservations XML payload that is returned from an AWS
RunInstances API call.
@param xml_bytes: raw XML bytes with a C{RunInstancesResponse} root
element.
"""
root = XML(xml_bytes)
# Get the security group information.
groups = []
for group_data in root.find("groupSet"):
group_id = group_data.findtext("groupId")
groups.append(group_id)
# Create a reservation object with the parsed data.
reservation = model.Reservation(
reservation_id=root.findtext("reservationId"),
owner_id=root.findtext("ownerId"),
groups=groups)
# Get the list of instances.
instances = self.instances_set(root, reservation)
return instances | [
"def",
"run_instances",
"(",
"self",
",",
"xml_bytes",
")",
":",
"root",
"=",
"XML",
"(",
"xml_bytes",
")",
"# Get the security group information.",
"groups",
"=",
"[",
"]",
"for",
"group_data",
"in",
"root",
".",
"find",
"(",
"\"groupSet\"",
")",
":",
"grou... | Parse the reservations XML payload that is returned from an AWS
RunInstances API call.
@param xml_bytes: raw XML bytes with a C{RunInstancesResponse} root
element. | [
"Parse",
"the",
"reservations",
"XML",
"payload",
"that",
"is",
"returned",
"from",
"an",
"AWS",
"RunInstances",
"API",
"call",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/ec2/client.py#L660-L681 | train | 36,814 |
twisted/txaws | txaws/ec2/client.py | Signature.compute | def compute(self):
"""Compute and return the signature according to the given data."""
if "Signature" in self.params:
raise RuntimeError("Existing signature in parameters")
if self.signature_version is not None:
version = self.signature_version
else:
version = self.params["SignatureVersion"]
if str(version) == "1":
bytes = self.old_signing_text()
hash_type = "sha1"
elif str(version) == "2":
bytes = self.signing_text()
if self.signature_method is not None:
signature_method = self.signature_method
else:
signature_method = self.params["SignatureMethod"]
hash_type = signature_method[len("Hmac"):].lower()
else:
raise RuntimeError("Unsupported SignatureVersion: '%s'" % version)
return self.creds.sign(bytes, hash_type) | python | def compute(self):
"""Compute and return the signature according to the given data."""
if "Signature" in self.params:
raise RuntimeError("Existing signature in parameters")
if self.signature_version is not None:
version = self.signature_version
else:
version = self.params["SignatureVersion"]
if str(version) == "1":
bytes = self.old_signing_text()
hash_type = "sha1"
elif str(version) == "2":
bytes = self.signing_text()
if self.signature_method is not None:
signature_method = self.signature_method
else:
signature_method = self.params["SignatureMethod"]
hash_type = signature_method[len("Hmac"):].lower()
else:
raise RuntimeError("Unsupported SignatureVersion: '%s'" % version)
return self.creds.sign(bytes, hash_type) | [
"def",
"compute",
"(",
"self",
")",
":",
"if",
"\"Signature\"",
"in",
"self",
".",
"params",
":",
"raise",
"RuntimeError",
"(",
"\"Existing signature in parameters\"",
")",
"if",
"self",
".",
"signature_version",
"is",
"not",
"None",
":",
"version",
"=",
"self... | Compute and return the signature according to the given data. | [
"Compute",
"and",
"return",
"the",
"signature",
"according",
"to",
"the",
"given",
"data",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/ec2/client.py#L1077-L1097 | train | 36,815 |
twisted/txaws | txaws/ec2/client.py | Signature.old_signing_text | def old_signing_text(self):
"""Return the text needed for signing using SignatureVersion 1."""
result = []
lower_cmp = lambda x, y: cmp(x[0].lower(), y[0].lower())
for key, value in sorted(self.params.items(), cmp=lower_cmp):
result.append("%s%s" % (key, value))
return "".join(result) | python | def old_signing_text(self):
"""Return the text needed for signing using SignatureVersion 1."""
result = []
lower_cmp = lambda x, y: cmp(x[0].lower(), y[0].lower())
for key, value in sorted(self.params.items(), cmp=lower_cmp):
result.append("%s%s" % (key, value))
return "".join(result) | [
"def",
"old_signing_text",
"(",
"self",
")",
":",
"result",
"=",
"[",
"]",
"lower_cmp",
"=",
"lambda",
"x",
",",
"y",
":",
"cmp",
"(",
"x",
"[",
"0",
"]",
".",
"lower",
"(",
")",
",",
"y",
"[",
"0",
"]",
".",
"lower",
"(",
")",
")",
"for",
... | Return the text needed for signing using SignatureVersion 1. | [
"Return",
"the",
"text",
"needed",
"for",
"signing",
"using",
"SignatureVersion",
"1",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/ec2/client.py#L1099-L1105 | train | 36,816 |
twisted/txaws | txaws/ec2/client.py | Signature.signing_text | def signing_text(self):
"""Return the text to be signed when signing the query."""
result = "%s\n%s\n%s\n%s" % (self.endpoint.method,
self.endpoint.get_canonical_host(),
self.endpoint.path,
self.get_canonical_query_params())
return result | python | def signing_text(self):
"""Return the text to be signed when signing the query."""
result = "%s\n%s\n%s\n%s" % (self.endpoint.method,
self.endpoint.get_canonical_host(),
self.endpoint.path,
self.get_canonical_query_params())
return result | [
"def",
"signing_text",
"(",
"self",
")",
":",
"result",
"=",
"\"%s\\n%s\\n%s\\n%s\"",
"%",
"(",
"self",
".",
"endpoint",
".",
"method",
",",
"self",
".",
"endpoint",
".",
"get_canonical_host",
"(",
")",
",",
"self",
".",
"endpoint",
".",
"path",
",",
"se... | Return the text to be signed when signing the query. | [
"Return",
"the",
"text",
"to",
"be",
"signed",
"when",
"signing",
"the",
"query",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/ec2/client.py#L1107-L1113 | train | 36,817 |
twisted/txaws | txaws/ec2/client.py | Signature.encode | def encode(self, string):
"""Encode a_string as per the canonicalisation encoding rules.
See the AWS dev reference page 186 (2009-11-30 version).
@return: a_string encoded.
"""
if isinstance(string, unicode):
string = string.encode("utf-8")
return quote(string, safe="~") | python | def encode(self, string):
"""Encode a_string as per the canonicalisation encoding rules.
See the AWS dev reference page 186 (2009-11-30 version).
@return: a_string encoded.
"""
if isinstance(string, unicode):
string = string.encode("utf-8")
return quote(string, safe="~") | [
"def",
"encode",
"(",
"self",
",",
"string",
")",
":",
"if",
"isinstance",
"(",
"string",
",",
"unicode",
")",
":",
"string",
"=",
"string",
".",
"encode",
"(",
"\"utf-8\"",
")",
"return",
"quote",
"(",
"string",
",",
"safe",
"=",
"\"~\"",
")"
] | Encode a_string as per the canonicalisation encoding rules.
See the AWS dev reference page 186 (2009-11-30 version).
@return: a_string encoded. | [
"Encode",
"a_string",
"as",
"per",
"the",
"canonicalisation",
"encoding",
"rules",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/ec2/client.py#L1122-L1130 | train | 36,818 |
twisted/txaws | txaws/s3/model.py | MultipartInitiationResponse.from_xml | def from_xml(cls, xml_bytes):
"""
Create an instance of this from XML bytes.
@param xml_bytes: C{str} bytes of XML to parse
@return: an instance of L{MultipartInitiationResponse}
"""
root = XML(xml_bytes)
return cls(root.findtext('Bucket'),
root.findtext('Key'),
root.findtext('UploadId')) | python | def from_xml(cls, xml_bytes):
"""
Create an instance of this from XML bytes.
@param xml_bytes: C{str} bytes of XML to parse
@return: an instance of L{MultipartInitiationResponse}
"""
root = XML(xml_bytes)
return cls(root.findtext('Bucket'),
root.findtext('Key'),
root.findtext('UploadId')) | [
"def",
"from_xml",
"(",
"cls",
",",
"xml_bytes",
")",
":",
"root",
"=",
"XML",
"(",
"xml_bytes",
")",
"return",
"cls",
"(",
"root",
".",
"findtext",
"(",
"'Bucket'",
")",
",",
"root",
".",
"findtext",
"(",
"'Key'",
")",
",",
"root",
".",
"findtext",
... | Create an instance of this from XML bytes.
@param xml_bytes: C{str} bytes of XML to parse
@return: an instance of L{MultipartInitiationResponse} | [
"Create",
"an",
"instance",
"of",
"this",
"from",
"XML",
"bytes",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/s3/model.py#L178-L188 | train | 36,819 |
T-002/pycast | pycast/common/matrix.py | Matrix._initialize_with_array | def _initialize_with_array(self, data, rowBased=True):
"""Set the matrix values from a two dimensional list."""
if rowBased:
self.matrix = []
if len(data) != self._rows:
raise ValueError("Size of Matrix does not match")
for col in xrange(self._columns):
self.matrix.append([])
for row in xrange(self._rows):
if len(data[row]) != self._columns:
raise ValueError("Size of Matrix does not match")
self.matrix[col].append(data[row][col])
else:
if len(data) != self._columns:
raise ValueError("Size of Matrix does not match")
for col in data:
if len(col) != self._rows:
raise ValueError("Size of Matrix does not match")
self.matrix = copy.deepcopy(data) | python | def _initialize_with_array(self, data, rowBased=True):
"""Set the matrix values from a two dimensional list."""
if rowBased:
self.matrix = []
if len(data) != self._rows:
raise ValueError("Size of Matrix does not match")
for col in xrange(self._columns):
self.matrix.append([])
for row in xrange(self._rows):
if len(data[row]) != self._columns:
raise ValueError("Size of Matrix does not match")
self.matrix[col].append(data[row][col])
else:
if len(data) != self._columns:
raise ValueError("Size of Matrix does not match")
for col in data:
if len(col) != self._rows:
raise ValueError("Size of Matrix does not match")
self.matrix = copy.deepcopy(data) | [
"def",
"_initialize_with_array",
"(",
"self",
",",
"data",
",",
"rowBased",
"=",
"True",
")",
":",
"if",
"rowBased",
":",
"self",
".",
"matrix",
"=",
"[",
"]",
"if",
"len",
"(",
"data",
")",
"!=",
"self",
".",
"_rows",
":",
"raise",
"ValueError",
"("... | Set the matrix values from a two dimensional list. | [
"Set",
"the",
"matrix",
"values",
"from",
"a",
"two",
"dimensional",
"list",
"."
] | 8a53505c6d8367e0ea572e8af768e80b29e1cc41 | https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/common/matrix.py#L154-L172 | train | 36,820 |
T-002/pycast | pycast/common/matrix.py | Matrix.from_timeseries | def from_timeseries(cls, timeSeries):
"""Create a new Matrix instance from a TimeSeries or MultiDimensionalTimeSeries
:param TimeSeries timeSeries: The TimeSeries, which should be used to
create a new Matrix.
:return: A Matrix with the values of the timeSeries. Each row of
the Matrix represents one entry of the timeSeries.
The time of an entry is ignored in the matrix.
:rtype: Matrix
:raise: Raises an :py:exc:`ValueError`, if the timeSeries is empty.
"""
width = 1
if isinstance(timeSeries, MultiDimensionalTimeSeries):
width = timeSeries.dimension_count()
matrixData = [[] for dummy in xrange(width)]
for entry in timeSeries:
for col in xrange(1, len(entry)):
matrixData[col - 1].append(entry[col])
if not matrixData[0]:
raise ValueError("Cannot create Matrix from empty Timeseries")
mtrx = Matrix.from_two_dim_array(len(matrixData), len(matrixData[0]), matrixData)
# mtrx.initialize(matrixData, rowBased=False)
return mtrx | python | def from_timeseries(cls, timeSeries):
"""Create a new Matrix instance from a TimeSeries or MultiDimensionalTimeSeries
:param TimeSeries timeSeries: The TimeSeries, which should be used to
create a new Matrix.
:return: A Matrix with the values of the timeSeries. Each row of
the Matrix represents one entry of the timeSeries.
The time of an entry is ignored in the matrix.
:rtype: Matrix
:raise: Raises an :py:exc:`ValueError`, if the timeSeries is empty.
"""
width = 1
if isinstance(timeSeries, MultiDimensionalTimeSeries):
width = timeSeries.dimension_count()
matrixData = [[] for dummy in xrange(width)]
for entry in timeSeries:
for col in xrange(1, len(entry)):
matrixData[col - 1].append(entry[col])
if not matrixData[0]:
raise ValueError("Cannot create Matrix from empty Timeseries")
mtrx = Matrix.from_two_dim_array(len(matrixData), len(matrixData[0]), matrixData)
# mtrx.initialize(matrixData, rowBased=False)
return mtrx | [
"def",
"from_timeseries",
"(",
"cls",
",",
"timeSeries",
")",
":",
"width",
"=",
"1",
"if",
"isinstance",
"(",
"timeSeries",
",",
"MultiDimensionalTimeSeries",
")",
":",
"width",
"=",
"timeSeries",
".",
"dimension_count",
"(",
")",
"matrixData",
"=",
"[",
"[... | Create a new Matrix instance from a TimeSeries or MultiDimensionalTimeSeries
:param TimeSeries timeSeries: The TimeSeries, which should be used to
create a new Matrix.
:return: A Matrix with the values of the timeSeries. Each row of
the Matrix represents one entry of the timeSeries.
The time of an entry is ignored in the matrix.
:rtype: Matrix
:raise: Raises an :py:exc:`ValueError`, if the timeSeries is empty. | [
"Create",
"a",
"new",
"Matrix",
"instance",
"from",
"a",
"TimeSeries",
"or",
"MultiDimensionalTimeSeries"
] | 8a53505c6d8367e0ea572e8af768e80b29e1cc41 | https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/common/matrix.py#L175-L202 | train | 36,821 |
T-002/pycast | pycast/common/matrix.py | Matrix.from_two_dim_array | def from_two_dim_array(cls, cols, rows, twoDimArray):
"""Create a new Matrix instance from a two dimensional array.
:param integer columns: The number of columns for the Matrix.
:param integer rows: The number of rows for the Matrix.
:param list twoDimArray: A two dimensional column based array
with the values of the matrix.
:raise: Raises an :py:exc:`ValueError` if:
- columns < 1 or
- rows < 1
- the size of the parameter does not match with the size of
the Matrix.
"""
return Matrix(cols, rows, twoDimArray, rowBased=False, isOneDimArray=False) | python | def from_two_dim_array(cls, cols, rows, twoDimArray):
"""Create a new Matrix instance from a two dimensional array.
:param integer columns: The number of columns for the Matrix.
:param integer rows: The number of rows for the Matrix.
:param list twoDimArray: A two dimensional column based array
with the values of the matrix.
:raise: Raises an :py:exc:`ValueError` if:
- columns < 1 or
- rows < 1
- the size of the parameter does not match with the size of
the Matrix.
"""
return Matrix(cols, rows, twoDimArray, rowBased=False, isOneDimArray=False) | [
"def",
"from_two_dim_array",
"(",
"cls",
",",
"cols",
",",
"rows",
",",
"twoDimArray",
")",
":",
"return",
"Matrix",
"(",
"cols",
",",
"rows",
",",
"twoDimArray",
",",
"rowBased",
"=",
"False",
",",
"isOneDimArray",
"=",
"False",
")"
] | Create a new Matrix instance from a two dimensional array.
:param integer columns: The number of columns for the Matrix.
:param integer rows: The number of rows for the Matrix.
:param list twoDimArray: A two dimensional column based array
with the values of the matrix.
:raise: Raises an :py:exc:`ValueError` if:
- columns < 1 or
- rows < 1
- the size of the parameter does not match with the size of
the Matrix. | [
"Create",
"a",
"new",
"Matrix",
"instance",
"from",
"a",
"two",
"dimensional",
"array",
"."
] | 8a53505c6d8367e0ea572e8af768e80b29e1cc41 | https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/common/matrix.py#L205-L218 | train | 36,822 |
T-002/pycast | pycast/common/matrix.py | Matrix.get_matrix_from_list | def get_matrix_from_list(self, rows, columns, matrix_list, rowBased=True):
"""Create a new Matrix instance from a matrix_list.
:note: This method is used to create a Matrix instance using cpython.
:param integer rows: The height of the Matrix.
:param integer columns: The width of the Matrix.
:param matrix_list: A one dimensional list containing the
values for Matrix. Depending on the
rowBased parameter, either the rows are
combined or the columns.
:param rowBased Boolean: Only necessary if the oneDimArray is given.
Indicates whether the oneDimArray combines
rows together (rowBased=True) or columns
(rowBased=False).
"""
resultMatrix = Matrix(columns, rows, matrix_list, rowBased)
return resultMatrix | python | def get_matrix_from_list(self, rows, columns, matrix_list, rowBased=True):
"""Create a new Matrix instance from a matrix_list.
:note: This method is used to create a Matrix instance using cpython.
:param integer rows: The height of the Matrix.
:param integer columns: The width of the Matrix.
:param matrix_list: A one dimensional list containing the
values for Matrix. Depending on the
rowBased parameter, either the rows are
combined or the columns.
:param rowBased Boolean: Only necessary if the oneDimArray is given.
Indicates whether the oneDimArray combines
rows together (rowBased=True) or columns
(rowBased=False).
"""
resultMatrix = Matrix(columns, rows, matrix_list, rowBased)
return resultMatrix | [
"def",
"get_matrix_from_list",
"(",
"self",
",",
"rows",
",",
"columns",
",",
"matrix_list",
",",
"rowBased",
"=",
"True",
")",
":",
"resultMatrix",
"=",
"Matrix",
"(",
"columns",
",",
"rows",
",",
"matrix_list",
",",
"rowBased",
")",
"return",
"resultMatrix... | Create a new Matrix instance from a matrix_list.
:note: This method is used to create a Matrix instance using cpython.
:param integer rows: The height of the Matrix.
:param integer columns: The width of the Matrix.
:param matrix_list: A one dimensional list containing the
values for Matrix. Depending on the
rowBased parameter, either the rows are
combined or the columns.
:param rowBased Boolean: Only necessary if the oneDimArray is given.
Indicates whether the oneDimArray combines
rows together (rowBased=True) or columns
(rowBased=False). | [
"Create",
"a",
"new",
"Matrix",
"instance",
"from",
"a",
"matrix_list",
"."
] | 8a53505c6d8367e0ea572e8af768e80b29e1cc41 | https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/common/matrix.py#L274-L290 | train | 36,823 |
T-002/pycast | pycast/common/matrix.py | Matrix.set_value | def set_value(self, column, row, value):
"""Set the value of the Matrix at the specified column and row.
:param integer column: The index for the column (starting at 0)
:param integer row: The index for the row (starting at 0)
:param numeric value: The new value at the given column/row
:raise: Raises an :py:exc:`IndexError` if the index is out of xrange.
"""
self.matrix[column][row] = value | python | def set_value(self, column, row, value):
"""Set the value of the Matrix at the specified column and row.
:param integer column: The index for the column (starting at 0)
:param integer row: The index for the row (starting at 0)
:param numeric value: The new value at the given column/row
:raise: Raises an :py:exc:`IndexError` if the index is out of xrange.
"""
self.matrix[column][row] = value | [
"def",
"set_value",
"(",
"self",
",",
"column",
",",
"row",
",",
"value",
")",
":",
"self",
".",
"matrix",
"[",
"column",
"]",
"[",
"row",
"]",
"=",
"value"
] | Set the value of the Matrix at the specified column and row.
:param integer column: The index for the column (starting at 0)
:param integer row: The index for the row (starting at 0)
:param numeric value: The new value at the given column/row
:raise: Raises an :py:exc:`IndexError` if the index is out of xrange. | [
"Set",
"the",
"value",
"of",
"the",
"Matrix",
"at",
"the",
"specified",
"column",
"and",
"row",
"."
] | 8a53505c6d8367e0ea572e8af768e80b29e1cc41 | https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/common/matrix.py#L292-L301 | train | 36,824 |
T-002/pycast | pycast/common/matrix.py | Matrix.invers | def invers(self):
"""Return the invers matrix, if it can be calculated
:return: Returns a new Matrix containing the invers
:rtype: Matrix
:raise: Raises an :py:exc:`ValueError` if the matrix is not inversible
:note: Only a squared matrix with a determinant != 0 can be inverted.
:todo: Reduce amount of create and copy operations
"""
if self._columns != self._rows:
raise ValueError("A square matrix is needed")
mArray = self.get_array(False)
appList = [0] * self._columns
# add identity matrix to array in order to use gauss jordan algorithm
for col in xrange(self._columns):
mArray.append(appList[:])
mArray[self._columns + col][col] = 1
# create new Matrix and execute gass jordan algorithm
exMatrix = Matrix.from_two_dim_array(2 * self._columns, self._rows, mArray)
gjResult = exMatrix.gauss_jordan()
# remove identity matrix from left side
# TODO Implement slicing directly for Matrix
gjResult.matrix = gjResult.matrix[self._columns:]
gjResult._columns = len(gjResult.matrix)
return gjResult | python | def invers(self):
"""Return the invers matrix, if it can be calculated
:return: Returns a new Matrix containing the invers
:rtype: Matrix
:raise: Raises an :py:exc:`ValueError` if the matrix is not inversible
:note: Only a squared matrix with a determinant != 0 can be inverted.
:todo: Reduce amount of create and copy operations
"""
if self._columns != self._rows:
raise ValueError("A square matrix is needed")
mArray = self.get_array(False)
appList = [0] * self._columns
# add identity matrix to array in order to use gauss jordan algorithm
for col in xrange(self._columns):
mArray.append(appList[:])
mArray[self._columns + col][col] = 1
# create new Matrix and execute gass jordan algorithm
exMatrix = Matrix.from_two_dim_array(2 * self._columns, self._rows, mArray)
gjResult = exMatrix.gauss_jordan()
# remove identity matrix from left side
# TODO Implement slicing directly for Matrix
gjResult.matrix = gjResult.matrix[self._columns:]
gjResult._columns = len(gjResult.matrix)
return gjResult | [
"def",
"invers",
"(",
"self",
")",
":",
"if",
"self",
".",
"_columns",
"!=",
"self",
".",
"_rows",
":",
"raise",
"ValueError",
"(",
"\"A square matrix is needed\"",
")",
"mArray",
"=",
"self",
".",
"get_array",
"(",
"False",
")",
"appList",
"=",
"[",
"0"... | Return the invers matrix, if it can be calculated
:return: Returns a new Matrix containing the invers
:rtype: Matrix
:raise: Raises an :py:exc:`ValueError` if the matrix is not inversible
:note: Only a squared matrix with a determinant != 0 can be inverted.
:todo: Reduce amount of create and copy operations | [
"Return",
"the",
"invers",
"matrix",
"if",
"it",
"can",
"be",
"calculated"
] | 8a53505c6d8367e0ea572e8af768e80b29e1cc41 | https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/common/matrix.py#L333-L361 | train | 36,825 |
T-002/pycast | pycast/common/matrix.py | Matrix.flatten | def flatten(self):
"""
If the current Matrix consists of Blockmatrixes as elementes method
flattens the Matrix into one Matrix only consisting of the 2nd level
elements
[[[1 2] [[3 4] to [[1 2 3 4]
[5 6]] [7 8]]] [5 6 7 8]]
"""
blocksize = self.get_array()[0][0].get_width()
width = self.get_width() * blocksize
columnsNew = [[] for dummy in xrange(width)]
for row in self.get_array():
index = 0
for submatrix in row:
for column in submatrix.get_array(False):
columnsNew[index] += column
index += 1
columnsFlat = sum(columnsNew, [])
return Matrix(width, len(columnsNew[0]), columnsFlat, rowBased=False) | python | def flatten(self):
"""
If the current Matrix consists of Blockmatrixes as elementes method
flattens the Matrix into one Matrix only consisting of the 2nd level
elements
[[[1 2] [[3 4] to [[1 2 3 4]
[5 6]] [7 8]]] [5 6 7 8]]
"""
blocksize = self.get_array()[0][0].get_width()
width = self.get_width() * blocksize
columnsNew = [[] for dummy in xrange(width)]
for row in self.get_array():
index = 0
for submatrix in row:
for column in submatrix.get_array(False):
columnsNew[index] += column
index += 1
columnsFlat = sum(columnsNew, [])
return Matrix(width, len(columnsNew[0]), columnsFlat, rowBased=False) | [
"def",
"flatten",
"(",
"self",
")",
":",
"blocksize",
"=",
"self",
".",
"get_array",
"(",
")",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"get_width",
"(",
")",
"width",
"=",
"self",
".",
"get_width",
"(",
")",
"*",
"blocksize",
"columnsNew",
"=",
"[",
"... | If the current Matrix consists of Blockmatrixes as elementes method
flattens the Matrix into one Matrix only consisting of the 2nd level
elements
[[[1 2] [[3 4] to [[1 2 3 4]
[5 6]] [7 8]]] [5 6 7 8]] | [
"If",
"the",
"current",
"Matrix",
"consists",
"of",
"Blockmatrixes",
"as",
"elementes",
"method",
"flattens",
"the",
"Matrix",
"into",
"one",
"Matrix",
"only",
"consisting",
"of",
"the",
"2nd",
"level",
"elements"
] | 8a53505c6d8367e0ea572e8af768e80b29e1cc41 | https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/common/matrix.py#L455-L477 | train | 36,826 |
T-002/pycast | pycast/common/matrix.py | Matrix.multiply | def multiply(self, multiplicator):
"""Return a new Matrix with a multiple.
:param Number multiplicator: The number to calculate the multiple
:return: The Matrix with the the multiple.
:rtype: Matrix
"""
result = Matrix(self.get_width(), self.get_height())
for row in xrange(self.get_height()):
for col in xrange(self.get_width()):
result.set_value(col, row, self.get_value(col, row) * multiplicator)
return result | python | def multiply(self, multiplicator):
"""Return a new Matrix with a multiple.
:param Number multiplicator: The number to calculate the multiple
:return: The Matrix with the the multiple.
:rtype: Matrix
"""
result = Matrix(self.get_width(), self.get_height())
for row in xrange(self.get_height()):
for col in xrange(self.get_width()):
result.set_value(col, row, self.get_value(col, row) * multiplicator)
return result | [
"def",
"multiply",
"(",
"self",
",",
"multiplicator",
")",
":",
"result",
"=",
"Matrix",
"(",
"self",
".",
"get_width",
"(",
")",
",",
"self",
".",
"get_height",
"(",
")",
")",
"for",
"row",
"in",
"xrange",
"(",
"self",
".",
"get_height",
"(",
")",
... | Return a new Matrix with a multiple.
:param Number multiplicator: The number to calculate the multiple
:return: The Matrix with the the multiple.
:rtype: Matrix | [
"Return",
"a",
"new",
"Matrix",
"with",
"a",
"multiple",
"."
] | 8a53505c6d8367e0ea572e8af768e80b29e1cc41 | https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/common/matrix.py#L504-L516 | train | 36,827 |
T-002/pycast | pycast/common/matrix.py | Matrix.transform | def transform(self):
"""Return a new transformed matrix.
:return: Returns a new transformed Matrix
:rtype: Matrix
"""
t_matrix = Matrix(self._rows, self._columns)
for col_i, col in enumerate(self.matrix):
for row_i, entry in enumerate(col):
t_matrix.set_value(row_i, col_i, entry)
return t_matrix | python | def transform(self):
"""Return a new transformed matrix.
:return: Returns a new transformed Matrix
:rtype: Matrix
"""
t_matrix = Matrix(self._rows, self._columns)
for col_i, col in enumerate(self.matrix):
for row_i, entry in enumerate(col):
t_matrix.set_value(row_i, col_i, entry)
return t_matrix | [
"def",
"transform",
"(",
"self",
")",
":",
"t_matrix",
"=",
"Matrix",
"(",
"self",
".",
"_rows",
",",
"self",
".",
"_columns",
")",
"for",
"col_i",
",",
"col",
"in",
"enumerate",
"(",
"self",
".",
"matrix",
")",
":",
"for",
"row_i",
",",
"entry",
"... | Return a new transformed matrix.
:return: Returns a new transformed Matrix
:rtype: Matrix | [
"Return",
"a",
"new",
"transformed",
"matrix",
"."
] | 8a53505c6d8367e0ea572e8af768e80b29e1cc41 | https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/common/matrix.py#L518-L528 | train | 36,828 |
T-002/pycast | pycast/common/matrix.py | Vector.initialize_from_matrix | def initialize_from_matrix(cls, matrix, column):
"""Create vector from matrix
:param Matrix matrix: The Matrix, which should be used to create the vector.
:param integer column: The column of the matrix, which should be used
to create the new vector.
:raise: Raises an :py:exc:`IndexError` if the matrix does not have the specified column.
"""
vec = Vector(matrix.get_height())
for row in xrange(matrix.get_height()):
vec.set_value(0, row, matrix.get_value(column, row))
return vec | python | def initialize_from_matrix(cls, matrix, column):
"""Create vector from matrix
:param Matrix matrix: The Matrix, which should be used to create the vector.
:param integer column: The column of the matrix, which should be used
to create the new vector.
:raise: Raises an :py:exc:`IndexError` if the matrix does not have the specified column.
"""
vec = Vector(matrix.get_height())
for row in xrange(matrix.get_height()):
vec.set_value(0, row, matrix.get_value(column, row))
return vec | [
"def",
"initialize_from_matrix",
"(",
"cls",
",",
"matrix",
",",
"column",
")",
":",
"vec",
"=",
"Vector",
"(",
"matrix",
".",
"get_height",
"(",
")",
")",
"for",
"row",
"in",
"xrange",
"(",
"matrix",
".",
"get_height",
"(",
")",
")",
":",
"vec",
"."... | Create vector from matrix
:param Matrix matrix: The Matrix, which should be used to create the vector.
:param integer column: The column of the matrix, which should be used
to create the new vector.
:raise: Raises an :py:exc:`IndexError` if the matrix does not have the specified column. | [
"Create",
"vector",
"from",
"matrix"
] | 8a53505c6d8367e0ea572e8af768e80b29e1cc41 | https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/common/matrix.py#L974-L985 | train | 36,829 |
T-002/pycast | pycast/common/matrix.py | Vector.unify | def unify(self):
"""Unifies the vector. The length of the vector will be 1.
:return: Return the instance itself
:rtype: Vector
"""
length = float(self.norm())
for row in xrange(self.get_height()):
self.set_value(0, row, self.get_value(0, row) / length)
return self | python | def unify(self):
"""Unifies the vector. The length of the vector will be 1.
:return: Return the instance itself
:rtype: Vector
"""
length = float(self.norm())
for row in xrange(self.get_height()):
self.set_value(0, row, self.get_value(0, row) / length)
return self | [
"def",
"unify",
"(",
"self",
")",
":",
"length",
"=",
"float",
"(",
"self",
".",
"norm",
"(",
")",
")",
"for",
"row",
"in",
"xrange",
"(",
"self",
".",
"get_height",
"(",
")",
")",
":",
"self",
".",
"set_value",
"(",
"0",
",",
"row",
",",
"self... | Unifies the vector. The length of the vector will be 1.
:return: Return the instance itself
:rtype: Vector | [
"Unifies",
"the",
"vector",
".",
"The",
"length",
"of",
"the",
"vector",
"will",
"be",
"1",
"."
] | 8a53505c6d8367e0ea572e8af768e80b29e1cc41 | https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/common/matrix.py#L995-L1004 | train | 36,830 |
pdkit/pdkit | pdkit/finger_tapping_processor.py | FingerTappingProcessor.moving_frequency | def moving_frequency(self, data_frame):
"""
This method returns moving frequency
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return diff_mov_freq: frequency
:rtype diff_mov_freq: float
"""
f = []
for i in range(0, (data_frame.td[-1].astype('int') - self.window)):
f.append(sum(data_frame.action_type[(data_frame.td >= i) & (data_frame.td < (i + self.window))] == 1) /
float(self.window))
diff_mov_freq = (np.array(f[1:-1]) - np.array(f[0:-2])) / np.array(f[0:-2])
duration = math.ceil(data_frame.td[-1])
return diff_mov_freq, duration | python | def moving_frequency(self, data_frame):
"""
This method returns moving frequency
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return diff_mov_freq: frequency
:rtype diff_mov_freq: float
"""
f = []
for i in range(0, (data_frame.td[-1].astype('int') - self.window)):
f.append(sum(data_frame.action_type[(data_frame.td >= i) & (data_frame.td < (i + self.window))] == 1) /
float(self.window))
diff_mov_freq = (np.array(f[1:-1]) - np.array(f[0:-2])) / np.array(f[0:-2])
duration = math.ceil(data_frame.td[-1])
return diff_mov_freq, duration | [
"def",
"moving_frequency",
"(",
"self",
",",
"data_frame",
")",
":",
"f",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"(",
"data_frame",
".",
"td",
"[",
"-",
"1",
"]",
".",
"astype",
"(",
"'int'",
")",
"-",
"self",
".",
"window",
"... | This method returns moving frequency
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return diff_mov_freq: frequency
:rtype diff_mov_freq: float | [
"This",
"method",
"returns",
"moving",
"frequency"
] | c7120263da2071bb139815fbdb56ca77b544f340 | https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/finger_tapping_processor.py#L65-L84 | train | 36,831 |
pdkit/pdkit | pdkit/finger_tapping_processor.py | FingerTappingProcessor.continuous_frequency | def continuous_frequency(self, data_frame):
"""
This method returns continuous frequency
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return cont_freq: frequency
:rtype cont_freq: float
"""
tap_timestamps = data_frame.td[data_frame.action_type==1]
cont_freq = 1.0/(np.array(tap_timestamps[1:-1])-np.array(tap_timestamps[0:-2]))
duration = math.ceil(data_frame.td[-1])
return cont_freq, duration | python | def continuous_frequency(self, data_frame):
"""
This method returns continuous frequency
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return cont_freq: frequency
:rtype cont_freq: float
"""
tap_timestamps = data_frame.td[data_frame.action_type==1]
cont_freq = 1.0/(np.array(tap_timestamps[1:-1])-np.array(tap_timestamps[0:-2]))
duration = math.ceil(data_frame.td[-1])
return cont_freq, duration | [
"def",
"continuous_frequency",
"(",
"self",
",",
"data_frame",
")",
":",
"tap_timestamps",
"=",
"data_frame",
".",
"td",
"[",
"data_frame",
".",
"action_type",
"==",
"1",
"]",
"cont_freq",
"=",
"1.0",
"/",
"(",
"np",
".",
"array",
"(",
"tap_timestamps",
"[... | This method returns continuous frequency
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return cont_freq: frequency
:rtype cont_freq: float | [
"This",
"method",
"returns",
"continuous",
"frequency"
] | c7120263da2071bb139815fbdb56ca77b544f340 | https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/finger_tapping_processor.py#L86-L100 | train | 36,832 |
pdkit/pdkit | pdkit/finger_tapping_processor.py | FingerTappingProcessor.incoordination_score | def incoordination_score(self, data_frame):
"""
This method calculates the variance of the time interval in msec between taps
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return is: incoordination score
:rtype is: float
"""
diff = data_frame.td[1:-1].values - data_frame.td[0:-2].values
inc_s = np.var(diff[np.arange(1, len(diff), 2)], dtype=np.float64) * 1000.0
duration = math.ceil(data_frame.td[-1])
return inc_s, duration | python | def incoordination_score(self, data_frame):
"""
This method calculates the variance of the time interval in msec between taps
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return is: incoordination score
:rtype is: float
"""
diff = data_frame.td[1:-1].values - data_frame.td[0:-2].values
inc_s = np.var(diff[np.arange(1, len(diff), 2)], dtype=np.float64) * 1000.0
duration = math.ceil(data_frame.td[-1])
return inc_s, duration | [
"def",
"incoordination_score",
"(",
"self",
",",
"data_frame",
")",
":",
"diff",
"=",
"data_frame",
".",
"td",
"[",
"1",
":",
"-",
"1",
"]",
".",
"values",
"-",
"data_frame",
".",
"td",
"[",
"0",
":",
"-",
"2",
"]",
".",
"values",
"inc_s",
"=",
"... | This method calculates the variance of the time interval in msec between taps
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return is: incoordination score
:rtype is: float | [
"This",
"method",
"calculates",
"the",
"variance",
"of",
"the",
"time",
"interval",
"in",
"msec",
"between",
"taps"
] | c7120263da2071bb139815fbdb56ca77b544f340 | https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/finger_tapping_processor.py#L118-L131 | train | 36,833 |
pdkit/pdkit | pdkit/finger_tapping_processor.py | FingerTappingProcessor.kinesia_scores | def kinesia_scores(self, data_frame):
"""
This method calculates the number of key taps
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return ks: key taps
:rtype ks: float
:return duration: test duration (seconds)
:rtype duration: float
"""
# tap_timestamps = data_frame.td[data_frame.action_type == 1]
# grouped = tap_timestamps.groupby(pd.TimeGrouper('30u'))
# return np.mean(grouped.size().values)
ks = sum(data_frame.action_type == 1)
duration = math.ceil(data_frame.td[-1])
return ks, duration | python | def kinesia_scores(self, data_frame):
"""
This method calculates the number of key taps
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return ks: key taps
:rtype ks: float
:return duration: test duration (seconds)
:rtype duration: float
"""
# tap_timestamps = data_frame.td[data_frame.action_type == 1]
# grouped = tap_timestamps.groupby(pd.TimeGrouper('30u'))
# return np.mean(grouped.size().values)
ks = sum(data_frame.action_type == 1)
duration = math.ceil(data_frame.td[-1])
return ks, duration | [
"def",
"kinesia_scores",
"(",
"self",
",",
"data_frame",
")",
":",
"# tap_timestamps = data_frame.td[data_frame.action_type == 1]",
"# grouped = tap_timestamps.groupby(pd.TimeGrouper('30u'))",
"# return np.mean(grouped.size().values)",
"ks",
"=",
"sum",
"(",
"data_frame",
".",
"act... | This method calculates the number of key taps
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return ks: key taps
:rtype ks: float
:return duration: test duration (seconds)
:rtype duration: float | [
"This",
"method",
"calculates",
"the",
"number",
"of",
"key",
"taps"
] | c7120263da2071bb139815fbdb56ca77b544f340 | https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/finger_tapping_processor.py#L149-L166 | train | 36,834 |
pdkit/pdkit | pdkit/finger_tapping_processor.py | FingerTappingProcessor.akinesia_times | def akinesia_times(self, data_frame):
"""
This method calculates akinesia times, mean dwell time on each key in milliseconds
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return at: akinesia times
:rtype at: float
:return duration: test duration (seconds)
:rtype duration: float
"""
raise_timestamps = data_frame.td[data_frame.action_type == 1]
down_timestamps = data_frame.td[data_frame.action_type == 0]
if len(raise_timestamps) == len(down_timestamps):
at = np.mean(down_timestamps.values - raise_timestamps.values)
else:
if len(raise_timestamps) > len(down_timestamps):
at = np.mean(down_timestamps.values - raise_timestamps.values[:-(len(raise_timestamps)
- len(down_timestamps))])
else:
at = np.mean(down_timestamps.values[:-(len(down_timestamps)-len(raise_timestamps))]
- raise_timestamps.values)
duration = math.ceil(data_frame.td[-1])
return np.abs(at), duration | python | def akinesia_times(self, data_frame):
"""
This method calculates akinesia times, mean dwell time on each key in milliseconds
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return at: akinesia times
:rtype at: float
:return duration: test duration (seconds)
:rtype duration: float
"""
raise_timestamps = data_frame.td[data_frame.action_type == 1]
down_timestamps = data_frame.td[data_frame.action_type == 0]
if len(raise_timestamps) == len(down_timestamps):
at = np.mean(down_timestamps.values - raise_timestamps.values)
else:
if len(raise_timestamps) > len(down_timestamps):
at = np.mean(down_timestamps.values - raise_timestamps.values[:-(len(raise_timestamps)
- len(down_timestamps))])
else:
at = np.mean(down_timestamps.values[:-(len(down_timestamps)-len(raise_timestamps))]
- raise_timestamps.values)
duration = math.ceil(data_frame.td[-1])
return np.abs(at), duration | [
"def",
"akinesia_times",
"(",
"self",
",",
"data_frame",
")",
":",
"raise_timestamps",
"=",
"data_frame",
".",
"td",
"[",
"data_frame",
".",
"action_type",
"==",
"1",
"]",
"down_timestamps",
"=",
"data_frame",
".",
"td",
"[",
"data_frame",
".",
"action_type",
... | This method calculates akinesia times, mean dwell time on each key in milliseconds
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return at: akinesia times
:rtype at: float
:return duration: test duration (seconds)
:rtype duration: float | [
"This",
"method",
"calculates",
"akinesia",
"times",
"mean",
"dwell",
"time",
"on",
"each",
"key",
"in",
"milliseconds"
] | c7120263da2071bb139815fbdb56ca77b544f340 | https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/finger_tapping_processor.py#L168-L192 | train | 36,835 |
pdkit/pdkit | pdkit/finger_tapping_processor.py | FingerTappingProcessor.dysmetria_score | def dysmetria_score(self, data_frame):
"""
This method calculates accuracy of target taps in pixels
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return ds: dysmetria score in pixels
:rtype ds: float
"""
tap_data = data_frame[data_frame.action_type == 0]
ds = np.mean(np.sqrt((tap_data.x - tap_data.x_target) ** 2 + (tap_data.y - tap_data.y_target) ** 2))
duration = math.ceil(data_frame.td[-1])
return ds, duration | python | def dysmetria_score(self, data_frame):
"""
This method calculates accuracy of target taps in pixels
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return ds: dysmetria score in pixels
:rtype ds: float
"""
tap_data = data_frame[data_frame.action_type == 0]
ds = np.mean(np.sqrt((tap_data.x - tap_data.x_target) ** 2 + (tap_data.y - tap_data.y_target) ** 2))
duration = math.ceil(data_frame.td[-1])
return ds, duration | [
"def",
"dysmetria_score",
"(",
"self",
",",
"data_frame",
")",
":",
"tap_data",
"=",
"data_frame",
"[",
"data_frame",
".",
"action_type",
"==",
"0",
"]",
"ds",
"=",
"np",
".",
"mean",
"(",
"np",
".",
"sqrt",
"(",
"(",
"tap_data",
".",
"x",
"-",
"tap_... | This method calculates accuracy of target taps in pixels
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return ds: dysmetria score in pixels
:rtype ds: float | [
"This",
"method",
"calculates",
"accuracy",
"of",
"target",
"taps",
"in",
"pixels"
] | c7120263da2071bb139815fbdb56ca77b544f340 | https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/finger_tapping_processor.py#L194-L207 | train | 36,836 |
pdkit/pdkit | pdkit/finger_tapping_processor.py | FingerTappingProcessor.extract_features | def extract_features(self, data_frame, pre=''):
"""
This method extracts all the features available to the Finger Tapping Processor class.
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return: 'frequency', 'moving_frequency','continuous_frequency','mean_moving_time','incoordination_score', \
'mean_alnt_target_distance','kinesia_scores', 'akinesia_times','dysmetria_score'
:rtype: list
"""
try:
return {pre+'frequency': self.frequency(data_frame)[0],
pre+'mean_moving_time': self.mean_moving_time(data_frame)[0],
pre+'incoordination_score': self.incoordination_score(data_frame)[0],
pre+'mean_alnt_target_distance': self.mean_alnt_target_distance(data_frame)[0],
pre+'kinesia_scores': self.kinesia_scores(data_frame)[0],
pre+'akinesia_times': self.akinesia_times(data_frame)[0],
pre+'dysmetria_score': self.dysmetria_score(data_frame)[0]}
except:
logging.error("Error on FingerTappingProcessor process, extract features: %s", sys.exc_info()[0]) | python | def extract_features(self, data_frame, pre=''):
"""
This method extracts all the features available to the Finger Tapping Processor class.
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return: 'frequency', 'moving_frequency','continuous_frequency','mean_moving_time','incoordination_score', \
'mean_alnt_target_distance','kinesia_scores', 'akinesia_times','dysmetria_score'
:rtype: list
"""
try:
return {pre+'frequency': self.frequency(data_frame)[0],
pre+'mean_moving_time': self.mean_moving_time(data_frame)[0],
pre+'incoordination_score': self.incoordination_score(data_frame)[0],
pre+'mean_alnt_target_distance': self.mean_alnt_target_distance(data_frame)[0],
pre+'kinesia_scores': self.kinesia_scores(data_frame)[0],
pre+'akinesia_times': self.akinesia_times(data_frame)[0],
pre+'dysmetria_score': self.dysmetria_score(data_frame)[0]}
except:
logging.error("Error on FingerTappingProcessor process, extract features: %s", sys.exc_info()[0]) | [
"def",
"extract_features",
"(",
"self",
",",
"data_frame",
",",
"pre",
"=",
"''",
")",
":",
"try",
":",
"return",
"{",
"pre",
"+",
"'frequency'",
":",
"self",
".",
"frequency",
"(",
"data_frame",
")",
"[",
"0",
"]",
",",
"pre",
"+",
"'mean_moving_time'... | This method extracts all the features available to the Finger Tapping Processor class.
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return: 'frequency', 'moving_frequency','continuous_frequency','mean_moving_time','incoordination_score', \
'mean_alnt_target_distance','kinesia_scores', 'akinesia_times','dysmetria_score'
:rtype: list | [
"This",
"method",
"extracts",
"all",
"the",
"features",
"available",
"to",
"the",
"Finger",
"Tapping",
"Processor",
"class",
"."
] | c7120263da2071bb139815fbdb56ca77b544f340 | https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/finger_tapping_processor.py#L209-L229 | train | 36,837 |
pdkit/pdkit | pdkit/clinical_updrs.py | Clinical_UPDRS.__get_features_for_observation | def __get_features_for_observation(self, data_frame=None, observation='LA-LL',
skip_id=None, last_column_is_id=False):
"""
Extract the features for a given observation from a data frame
:param data_frame: data frame to get features from
:type data_frame: pandas.DataFrame
:param observation: observation name
:type observation: string
:param skip_id: skip any test with a given id (optional)
:type skip_id: int
:param last_column_is_id: skip the last column of the data frame (useful when id is last column - optional)
:type last_column_is_id: bool
:return features: the features
:rtype features: np.array
"""
try:
features = np.array([])
if data_frame is None:
data_frame = self.data_frame
for index, row in data_frame.iterrows():
if not skip_id == row['id']:
features_row = np.nan_to_num(row[row.keys().str.contains(observation)].values)
features_row = np.append(features_row, row['id'])
features = np.vstack([features, features_row]) if features.size else features_row
# not the same when getting a single point
if last_column_is_id:
if np.ndim(features) > 1:
to_return = features[:,:-1]
else:
to_return = features[:-1]
else:
to_return = features
return to_return, data_frame['id'].values
except:
logging.error(" observation not found in data frame") | python | def __get_features_for_observation(self, data_frame=None, observation='LA-LL',
skip_id=None, last_column_is_id=False):
"""
Extract the features for a given observation from a data frame
:param data_frame: data frame to get features from
:type data_frame: pandas.DataFrame
:param observation: observation name
:type observation: string
:param skip_id: skip any test with a given id (optional)
:type skip_id: int
:param last_column_is_id: skip the last column of the data frame (useful when id is last column - optional)
:type last_column_is_id: bool
:return features: the features
:rtype features: np.array
"""
try:
features = np.array([])
if data_frame is None:
data_frame = self.data_frame
for index, row in data_frame.iterrows():
if not skip_id == row['id']:
features_row = np.nan_to_num(row[row.keys().str.contains(observation)].values)
features_row = np.append(features_row, row['id'])
features = np.vstack([features, features_row]) if features.size else features_row
# not the same when getting a single point
if last_column_is_id:
if np.ndim(features) > 1:
to_return = features[:,:-1]
else:
to_return = features[:-1]
else:
to_return = features
return to_return, data_frame['id'].values
except:
logging.error(" observation not found in data frame") | [
"def",
"__get_features_for_observation",
"(",
"self",
",",
"data_frame",
"=",
"None",
",",
"observation",
"=",
"'LA-LL'",
",",
"skip_id",
"=",
"None",
",",
"last_column_is_id",
"=",
"False",
")",
":",
"try",
":",
"features",
"=",
"np",
".",
"array",
"(",
"... | Extract the features for a given observation from a data frame
:param data_frame: data frame to get features from
:type data_frame: pandas.DataFrame
:param observation: observation name
:type observation: string
:param skip_id: skip any test with a given id (optional)
:type skip_id: int
:param last_column_is_id: skip the last column of the data frame (useful when id is last column - optional)
:type last_column_is_id: bool
:return features: the features
:rtype features: np.array | [
"Extract",
"the",
"features",
"for",
"a",
"given",
"observation",
"from",
"a",
"data",
"frame"
] | c7120263da2071bb139815fbdb56ca77b544f340 | https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/clinical_updrs.py#L140-L179 | train | 36,838 |
pdkit/pdkit | pdkit/clinical_updrs.py | Clinical_UPDRS.predict | def predict(self, measurement, output_format='array'):
"""
Method to predict the class labels for the provided data
:param measurement: the point to classify
:type measurement: pandas.DataFrame
:param output_format: the format to return the scores ('array' or 'str')
:type output_format: string
:return prediction: the prediction for a given test/point
:rtype prediction: np.array
"""
scores = np.array([])
for obs in self.observations:
knn = self.__get_knn_by_observation(obs)
p, ids = self.__get_features_for_observation(data_frame=measurement, observation=obs,
skip_id=3497, last_column_is_id=True)
score = knn.predict(pd.DataFrame(p).T)
scores = np.append(scores, score, axis=0)
if output_format == 'array':
return scores.astype(int)
else:
return np.array_str(scores.astype(int)) | python | def predict(self, measurement, output_format='array'):
"""
Method to predict the class labels for the provided data
:param measurement: the point to classify
:type measurement: pandas.DataFrame
:param output_format: the format to return the scores ('array' or 'str')
:type output_format: string
:return prediction: the prediction for a given test/point
:rtype prediction: np.array
"""
scores = np.array([])
for obs in self.observations:
knn = self.__get_knn_by_observation(obs)
p, ids = self.__get_features_for_observation(data_frame=measurement, observation=obs,
skip_id=3497, last_column_is_id=True)
score = knn.predict(pd.DataFrame(p).T)
scores = np.append(scores, score, axis=0)
if output_format == 'array':
return scores.astype(int)
else:
return np.array_str(scores.astype(int)) | [
"def",
"predict",
"(",
"self",
",",
"measurement",
",",
"output_format",
"=",
"'array'",
")",
":",
"scores",
"=",
"np",
".",
"array",
"(",
"[",
"]",
")",
"for",
"obs",
"in",
"self",
".",
"observations",
":",
"knn",
"=",
"self",
".",
"__get_knn_by_obser... | Method to predict the class labels for the provided data
:param measurement: the point to classify
:type measurement: pandas.DataFrame
:param output_format: the format to return the scores ('array' or 'str')
:type output_format: string
:return prediction: the prediction for a given test/point
:rtype prediction: np.array | [
"Method",
"to",
"predict",
"the",
"class",
"labels",
"for",
"the",
"provided",
"data"
] | c7120263da2071bb139815fbdb56ca77b544f340 | https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/clinical_updrs.py#L186-L209 | train | 36,839 |
twisted/txaws | txaws/server/schema.py | _namify_arguments | def _namify_arguments(mapping):
"""
Ensure that a mapping of names to parameters has the parameters set to the
correct name.
"""
result = []
for name, parameter in mapping.iteritems():
parameter.name = name
result.append(parameter)
return result | python | def _namify_arguments(mapping):
"""
Ensure that a mapping of names to parameters has the parameters set to the
correct name.
"""
result = []
for name, parameter in mapping.iteritems():
parameter.name = name
result.append(parameter)
return result | [
"def",
"_namify_arguments",
"(",
"mapping",
")",
":",
"result",
"=",
"[",
"]",
"for",
"name",
",",
"parameter",
"in",
"mapping",
".",
"iteritems",
"(",
")",
":",
"parameter",
".",
"name",
"=",
"name",
"result",
".",
"append",
"(",
"parameter",
")",
"re... | Ensure that a mapping of names to parameters has the parameters set to the
correct name. | [
"Ensure",
"that",
"a",
"mapping",
"of",
"names",
"to",
"parameters",
"has",
"the",
"parameters",
"set",
"to",
"the",
"correct",
"name",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/server/schema.py#L517-L526 | train | 36,840 |
twisted/txaws | txaws/server/schema.py | Parameter.coerce | def coerce(self, value):
"""Coerce a single value according to this parameter's settings.
@param value: A L{str}, or L{None}. If L{None} is passed - meaning no
value is avalable at all, not even the empty string - and this
parameter is optional, L{self.default} will be returned.
"""
if value is None:
if self.optional:
return self.default
else:
value = ""
if value == "":
if not self.allow_none:
raise MissingParameterError(self.name, kind=self.kind)
return self.default
try:
self._check_range(value)
parsed = self.parse(value)
if self.validator and not self.validator(parsed):
raise ValueError(value)
return parsed
except ValueError:
try:
value = value.decode("utf-8")
message = "Invalid %s value %s" % (self.kind, value)
except UnicodeDecodeError:
message = "Invalid %s value" % self.kind
raise InvalidParameterValueError(message) | python | def coerce(self, value):
"""Coerce a single value according to this parameter's settings.
@param value: A L{str}, or L{None}. If L{None} is passed - meaning no
value is avalable at all, not even the empty string - and this
parameter is optional, L{self.default} will be returned.
"""
if value is None:
if self.optional:
return self.default
else:
value = ""
if value == "":
if not self.allow_none:
raise MissingParameterError(self.name, kind=self.kind)
return self.default
try:
self._check_range(value)
parsed = self.parse(value)
if self.validator and not self.validator(parsed):
raise ValueError(value)
return parsed
except ValueError:
try:
value = value.decode("utf-8")
message = "Invalid %s value %s" % (self.kind, value)
except UnicodeDecodeError:
message = "Invalid %s value" % self.kind
raise InvalidParameterValueError(message) | [
"def",
"coerce",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"None",
":",
"if",
"self",
".",
"optional",
":",
"return",
"self",
".",
"default",
"else",
":",
"value",
"=",
"\"\"",
"if",
"value",
"==",
"\"\"",
":",
"if",
"not",
"self",
... | Coerce a single value according to this parameter's settings.
@param value: A L{str}, or L{None}. If L{None} is passed - meaning no
value is avalable at all, not even the empty string - and this
parameter is optional, L{self.default} will be returned. | [
"Coerce",
"a",
"single",
"value",
"according",
"to",
"this",
"parameter",
"s",
"settings",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/server/schema.py#L107-L135 | train | 36,841 |
twisted/txaws | txaws/server/schema.py | Structure.parse | def parse(self, value):
"""
Convert a dictionary of raw values to a dictionary of processed values.
"""
result = {}
rest = {}
for k, v in value.iteritems():
if k in self.fields:
if (isinstance(v, dict)
and not self.fields[k].supports_multiple):
if len(v) == 1:
# We support "foo.1" as "foo" as long as there is only
# one "foo.#" parameter provided.... -_-
v = v.values()[0]
else:
raise InvalidParameterCombinationError(k)
result[k] = self.fields[k].coerce(v)
else:
rest[k] = v
for k, v in self.fields.iteritems():
if k not in result:
result[k] = v.coerce(None)
if rest:
raise UnknownParametersError(result, rest)
return result | python | def parse(self, value):
"""
Convert a dictionary of raw values to a dictionary of processed values.
"""
result = {}
rest = {}
for k, v in value.iteritems():
if k in self.fields:
if (isinstance(v, dict)
and not self.fields[k].supports_multiple):
if len(v) == 1:
# We support "foo.1" as "foo" as long as there is only
# one "foo.#" parameter provided.... -_-
v = v.values()[0]
else:
raise InvalidParameterCombinationError(k)
result[k] = self.fields[k].coerce(v)
else:
rest[k] = v
for k, v in self.fields.iteritems():
if k not in result:
result[k] = v.coerce(None)
if rest:
raise UnknownParametersError(result, rest)
return result | [
"def",
"parse",
"(",
"self",
",",
"value",
")",
":",
"result",
"=",
"{",
"}",
"rest",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"value",
".",
"iteritems",
"(",
")",
":",
"if",
"k",
"in",
"self",
".",
"fields",
":",
"if",
"(",
"isinstance",
"... | Convert a dictionary of raw values to a dictionary of processed values. | [
"Convert",
"a",
"dictionary",
"of",
"raw",
"values",
"to",
"a",
"dictionary",
"of",
"processed",
"values",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/server/schema.py#L428-L452 | train | 36,842 |
twisted/txaws | txaws/server/schema.py | Structure.format | def format(self, value):
"""
Convert a dictionary of processed values to a dictionary of raw values.
"""
if not isinstance(value, Arguments):
value = value.iteritems()
return dict((k, self.fields[k].format(v)) for k, v in value) | python | def format(self, value):
"""
Convert a dictionary of processed values to a dictionary of raw values.
"""
if not isinstance(value, Arguments):
value = value.iteritems()
return dict((k, self.fields[k].format(v)) for k, v in value) | [
"def",
"format",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"Arguments",
")",
":",
"value",
"=",
"value",
".",
"iteritems",
"(",
")",
"return",
"dict",
"(",
"(",
"k",
",",
"self",
".",
"fields",
"[",
"k",
"... | Convert a dictionary of processed values to a dictionary of raw values. | [
"Convert",
"a",
"dictionary",
"of",
"processed",
"values",
"to",
"a",
"dictionary",
"of",
"raw",
"values",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/server/schema.py#L454-L460 | train | 36,843 |
twisted/txaws | txaws/server/schema.py | Schema.extend | def extend(self, *schema_items, **kwargs):
"""
Add any number of schema items to a new schema.
Takes the same arguments as the constructor, and returns a new
L{Schema} instance.
If parameters, result, or errors is specified, they will be merged with
the existing parameters, result, or errors.
"""
new_kwargs = {
'name': self.name,
'doc': self.doc,
'parameters': self._parameters[:],
'result': self.result.copy() if self.result else {},
'errors': self.errors.copy() if self.errors else set()}
if 'parameters' in kwargs:
new_params = kwargs.pop('parameters')
new_kwargs['parameters'].extend(new_params)
new_kwargs['result'].update(kwargs.pop('result', {}))
new_kwargs['errors'].update(kwargs.pop('errors', set()))
new_kwargs.update(kwargs)
if schema_items:
parameters = self._convert_old_schema(schema_items)
new_kwargs['parameters'].extend(parameters)
return Schema(**new_kwargs) | python | def extend(self, *schema_items, **kwargs):
"""
Add any number of schema items to a new schema.
Takes the same arguments as the constructor, and returns a new
L{Schema} instance.
If parameters, result, or errors is specified, they will be merged with
the existing parameters, result, or errors.
"""
new_kwargs = {
'name': self.name,
'doc': self.doc,
'parameters': self._parameters[:],
'result': self.result.copy() if self.result else {},
'errors': self.errors.copy() if self.errors else set()}
if 'parameters' in kwargs:
new_params = kwargs.pop('parameters')
new_kwargs['parameters'].extend(new_params)
new_kwargs['result'].update(kwargs.pop('result', {}))
new_kwargs['errors'].update(kwargs.pop('errors', set()))
new_kwargs.update(kwargs)
if schema_items:
parameters = self._convert_old_schema(schema_items)
new_kwargs['parameters'].extend(parameters)
return Schema(**new_kwargs) | [
"def",
"extend",
"(",
"self",
",",
"*",
"schema_items",
",",
"*",
"*",
"kwargs",
")",
":",
"new_kwargs",
"=",
"{",
"'name'",
":",
"self",
".",
"name",
",",
"'doc'",
":",
"self",
".",
"doc",
",",
"'parameters'",
":",
"self",
".",
"_parameters",
"[",
... | Add any number of schema items to a new schema.
Takes the same arguments as the constructor, and returns a new
L{Schema} instance.
If parameters, result, or errors is specified, they will be merged with
the existing parameters, result, or errors. | [
"Add",
"any",
"number",
"of",
"schema",
"items",
"to",
"a",
"new",
"schema",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/server/schema.py#L706-L732 | train | 36,844 |
twisted/txaws | txaws/server/schema.py | Schema._convert_old_schema | def _convert_old_schema(self, parameters):
"""
Convert an ugly old schema, using dotted names, to the hot new schema,
using List and Structure.
The old schema assumes that every other dot implies an array. So a list
of two parameters,
[Integer("foo.bar.baz.quux"), Integer("foo.bar.shimmy")]
becomes::
[List(
"foo",
item=Structure(
fields={"baz": List(item=Integer()),
"shimmy": Integer()}))]
By design, the old schema syntax ignored the names "bar" and "quux".
"""
# 'merged' here is an associative list that maps parameter names to
# Parameter instances, OR sub-associative lists which represent nested
# lists and structures.
# e.g.,
# [Integer("foo")]
# becomes
# [("foo", Integer("foo"))]
# and
# [Integer("foo.bar")]
# (which represents a list of integers called "foo" with a meaningless
# index name of "bar") becomes
# [("foo", [("bar", Integer("foo.bar"))])].
merged = []
for parameter in parameters:
segments = parameter.name.split('.')
_merge_associative_list(merged, segments, parameter)
result = [self._inner_convert_old_schema(node, 1) for node in merged]
return result | python | def _convert_old_schema(self, parameters):
"""
Convert an ugly old schema, using dotted names, to the hot new schema,
using List and Structure.
The old schema assumes that every other dot implies an array. So a list
of two parameters,
[Integer("foo.bar.baz.quux"), Integer("foo.bar.shimmy")]
becomes::
[List(
"foo",
item=Structure(
fields={"baz": List(item=Integer()),
"shimmy": Integer()}))]
By design, the old schema syntax ignored the names "bar" and "quux".
"""
# 'merged' here is an associative list that maps parameter names to
# Parameter instances, OR sub-associative lists which represent nested
# lists and structures.
# e.g.,
# [Integer("foo")]
# becomes
# [("foo", Integer("foo"))]
# and
# [Integer("foo.bar")]
# (which represents a list of integers called "foo" with a meaningless
# index name of "bar") becomes
# [("foo", [("bar", Integer("foo.bar"))])].
merged = []
for parameter in parameters:
segments = parameter.name.split('.')
_merge_associative_list(merged, segments, parameter)
result = [self._inner_convert_old_schema(node, 1) for node in merged]
return result | [
"def",
"_convert_old_schema",
"(",
"self",
",",
"parameters",
")",
":",
"# 'merged' here is an associative list that maps parameter names to",
"# Parameter instances, OR sub-associative lists which represent nested",
"# lists and structures.",
"# e.g.,",
"# [Integer(\"foo\")]",
"# becom... | Convert an ugly old schema, using dotted names, to the hot new schema,
using List and Structure.
The old schema assumes that every other dot implies an array. So a list
of two parameters,
[Integer("foo.bar.baz.quux"), Integer("foo.bar.shimmy")]
becomes::
[List(
"foo",
item=Structure(
fields={"baz": List(item=Integer()),
"shimmy": Integer()}))]
By design, the old schema syntax ignored the names "bar" and "quux". | [
"Convert",
"an",
"ugly",
"old",
"schema",
"using",
"dotted",
"names",
"to",
"the",
"hot",
"new",
"schema",
"using",
"List",
"and",
"Structure",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/server/schema.py#L734-L771 | train | 36,845 |
Duke-GCB/DukeDSClient | ddsc/core/util.py | ProgressPrinter.finished | def finished(self):
"""
Must be called to print final progress label.
"""
self.progress_bar.set_state(ProgressBar.STATE_DONE)
self.progress_bar.show() | python | def finished(self):
"""
Must be called to print final progress label.
"""
self.progress_bar.set_state(ProgressBar.STATE_DONE)
self.progress_bar.show() | [
"def",
"finished",
"(",
"self",
")",
":",
"self",
".",
"progress_bar",
".",
"set_state",
"(",
"ProgressBar",
".",
"STATE_DONE",
")",
"self",
".",
"progress_bar",
".",
"show",
"(",
")"
] | Must be called to print final progress label. | [
"Must",
"be",
"called",
"to",
"print",
"final",
"progress",
"label",
"."
] | 117f68fb9bae82e4c81ea487ad5d61ac350f3726 | https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/util.py#L76-L81 | train | 36,846 |
Duke-GCB/DukeDSClient | ddsc/core/util.py | ProgressPrinter.start_waiting | def start_waiting(self):
"""
Show waiting progress bar until done_waiting is called.
Only has an effect if we are in waiting state.
"""
if not self.waiting:
self.waiting = True
wait_msg = "Waiting for project to become ready for {}".format(self.msg_verb)
self.progress_bar.show_waiting(wait_msg) | python | def start_waiting(self):
"""
Show waiting progress bar until done_waiting is called.
Only has an effect if we are in waiting state.
"""
if not self.waiting:
self.waiting = True
wait_msg = "Waiting for project to become ready for {}".format(self.msg_verb)
self.progress_bar.show_waiting(wait_msg) | [
"def",
"start_waiting",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"waiting",
":",
"self",
".",
"waiting",
"=",
"True",
"wait_msg",
"=",
"\"Waiting for project to become ready for {}\"",
".",
"format",
"(",
"self",
".",
"msg_verb",
")",
"self",
".",
"p... | Show waiting progress bar until done_waiting is called.
Only has an effect if we are in waiting state. | [
"Show",
"waiting",
"progress",
"bar",
"until",
"done_waiting",
"is",
"called",
".",
"Only",
"has",
"an",
"effect",
"if",
"we",
"are",
"in",
"waiting",
"state",
"."
] | 117f68fb9bae82e4c81ea487ad5d61ac350f3726 | https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/util.py#L90-L98 | train | 36,847 |
twisted/txaws | txaws/s3/client.py | s3_url_context | def s3_url_context(service_endpoint, bucket=None, object_name=None):
"""
Create a URL based on the given service endpoint and suitable for
the given bucket or object.
@param service_endpoint: The service endpoint on which to base the
resulting URL.
@type service_endpoint: L{AWSServiceEndpoint}
@param bucket: If given, the name of a bucket to reference.
@type bucket: L{unicode}
@param object_name: If given, the name of an object or object
subresource to reference.
@type object_name: L{unicode}
"""
# Define our own query parser which can handle the consequences of
# `?acl` and such (subresources). At its best, parse_qsl doesn't
# let us differentiate between these and empty values (such as
# `?acl=`).
def p(s):
results = []
args = s.split(u"&")
for a in args:
pieces = a.split(u"=")
if len(pieces) == 1:
results.append((unquote(pieces[0]),))
elif len(pieces) == 2:
results.append(tuple(map(unquote, pieces)))
else:
raise Exception("oh no")
return results
query = []
path = []
if bucket is None:
path.append(u"")
else:
if isinstance(bucket, bytes):
bucket = bucket.decode("utf-8")
path.append(bucket)
if object_name is None:
path.append(u"")
else:
if isinstance(object_name, bytes):
object_name = object_name.decode("utf-8")
if u"?" in object_name:
object_name, query = object_name.split(u"?", 1)
query = p(query)
object_name_components = object_name.split(u"/")
if object_name_components[0] == u"":
object_name_components.pop(0)
if object_name_components:
path.extend(object_name_components)
else:
path.append(u"")
return _S3URLContext(
scheme=service_endpoint.scheme.decode("utf-8"),
host=service_endpoint.get_host().decode("utf-8"),
port=service_endpoint.port,
path=path,
query=query,
) | python | def s3_url_context(service_endpoint, bucket=None, object_name=None):
"""
Create a URL based on the given service endpoint and suitable for
the given bucket or object.
@param service_endpoint: The service endpoint on which to base the
resulting URL.
@type service_endpoint: L{AWSServiceEndpoint}
@param bucket: If given, the name of a bucket to reference.
@type bucket: L{unicode}
@param object_name: If given, the name of an object or object
subresource to reference.
@type object_name: L{unicode}
"""
# Define our own query parser which can handle the consequences of
# `?acl` and such (subresources). At its best, parse_qsl doesn't
# let us differentiate between these and empty values (such as
# `?acl=`).
def p(s):
results = []
args = s.split(u"&")
for a in args:
pieces = a.split(u"=")
if len(pieces) == 1:
results.append((unquote(pieces[0]),))
elif len(pieces) == 2:
results.append(tuple(map(unquote, pieces)))
else:
raise Exception("oh no")
return results
query = []
path = []
if bucket is None:
path.append(u"")
else:
if isinstance(bucket, bytes):
bucket = bucket.decode("utf-8")
path.append(bucket)
if object_name is None:
path.append(u"")
else:
if isinstance(object_name, bytes):
object_name = object_name.decode("utf-8")
if u"?" in object_name:
object_name, query = object_name.split(u"?", 1)
query = p(query)
object_name_components = object_name.split(u"/")
if object_name_components[0] == u"":
object_name_components.pop(0)
if object_name_components:
path.extend(object_name_components)
else:
path.append(u"")
return _S3URLContext(
scheme=service_endpoint.scheme.decode("utf-8"),
host=service_endpoint.get_host().decode("utf-8"),
port=service_endpoint.port,
path=path,
query=query,
) | [
"def",
"s3_url_context",
"(",
"service_endpoint",
",",
"bucket",
"=",
"None",
",",
"object_name",
"=",
"None",
")",
":",
"# Define our own query parser which can handle the consequences of",
"# `?acl` and such (subresources). At its best, parse_qsl doesn't",
"# let us differentiate ... | Create a URL based on the given service endpoint and suitable for
the given bucket or object.
@param service_endpoint: The service endpoint on which to base the
resulting URL.
@type service_endpoint: L{AWSServiceEndpoint}
@param bucket: If given, the name of a bucket to reference.
@type bucket: L{unicode}
@param object_name: If given, the name of an object or object
subresource to reference.
@type object_name: L{unicode} | [
"Create",
"a",
"URL",
"based",
"on",
"the",
"given",
"service",
"endpoint",
"and",
"suitable",
"for",
"the",
"given",
"bucket",
"or",
"object",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/s3/client.py#L824-L887 | train | 36,848 |
twisted/txaws | txaws/s3/client.py | S3Client.list_buckets | def list_buckets(self):
"""
List all buckets.
Returns a list of all the buckets owned by the authenticated sender of
the request.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(),
)
query = self._query_factory(details)
d = self._submit(query)
d.addCallback(self._parse_list_buckets)
return d | python | def list_buckets(self):
"""
List all buckets.
Returns a list of all the buckets owned by the authenticated sender of
the request.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(),
)
query = self._query_factory(details)
d = self._submit(query)
d.addCallback(self._parse_list_buckets)
return d | [
"def",
"list_buckets",
"(",
"self",
")",
":",
"details",
"=",
"self",
".",
"_details",
"(",
"method",
"=",
"b\"GET\"",
",",
"url_context",
"=",
"self",
".",
"_url_context",
"(",
")",
",",
")",
"query",
"=",
"self",
".",
"_query_factory",
"(",
"details",
... | List all buckets.
Returns a list of all the buckets owned by the authenticated sender of
the request. | [
"List",
"all",
"buckets",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/s3/client.py#L140-L154 | train | 36,849 |
twisted/txaws | txaws/s3/client.py | S3Client._parse_list_buckets | def _parse_list_buckets(self, (response, xml_bytes)):
"""
Parse XML bucket list response.
"""
root = XML(xml_bytes)
buckets = []
for bucket_data in root.find("Buckets"):
name = bucket_data.findtext("Name")
date_text = bucket_data.findtext("CreationDate")
date_time = parseTime(date_text)
bucket = Bucket(name, date_time)
buckets.append(bucket)
return buckets | python | def _parse_list_buckets(self, (response, xml_bytes)):
"""
Parse XML bucket list response.
"""
root = XML(xml_bytes)
buckets = []
for bucket_data in root.find("Buckets"):
name = bucket_data.findtext("Name")
date_text = bucket_data.findtext("CreationDate")
date_time = parseTime(date_text)
bucket = Bucket(name, date_time)
buckets.append(bucket)
return buckets | [
"def",
"_parse_list_buckets",
"(",
"self",
",",
"(",
"response",
",",
"xml_bytes",
")",
")",
":",
"root",
"=",
"XML",
"(",
"xml_bytes",
")",
"buckets",
"=",
"[",
"]",
"for",
"bucket_data",
"in",
"root",
".",
"find",
"(",
"\"Buckets\"",
")",
":",
"name"... | Parse XML bucket list response. | [
"Parse",
"XML",
"bucket",
"list",
"response",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/s3/client.py#L156-L168 | train | 36,850 |
twisted/txaws | txaws/s3/client.py | S3Client.get_bucket | def get_bucket(self, bucket, marker=None, max_keys=None, prefix=None):
"""
Get a list of all the objects in a bucket.
@param bucket: The name of the bucket from which to retrieve objects.
@type bucket: L{unicode}
@param marker: If given, indicate a position in the overall
results where the results of this call should begin. The
first result is the first object that sorts greater than
this marker.
@type marker: L{bytes} or L{NoneType}
@param max_keys: If given, the maximum number of objects to
return.
@type max_keys: L{int} or L{NoneType}
@param prefix: If given, indicate that only objects with keys
beginning with this value should be returned.
@type prefix: L{bytes} or L{NoneType}
@return: A L{Deferred} that fires with a L{BucketListing}
describing the result.
@see: U{http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html}
"""
args = []
if marker is not None:
args.append(("marker", marker))
if max_keys is not None:
args.append(("max-keys", "%d" % (max_keys,)))
if prefix is not None:
args.append(("prefix", prefix))
if args:
object_name = "?" + urlencode(args)
else:
object_name = None
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name=object_name),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_get_bucket)
return d | python | def get_bucket(self, bucket, marker=None, max_keys=None, prefix=None):
"""
Get a list of all the objects in a bucket.
@param bucket: The name of the bucket from which to retrieve objects.
@type bucket: L{unicode}
@param marker: If given, indicate a position in the overall
results where the results of this call should begin. The
first result is the first object that sorts greater than
this marker.
@type marker: L{bytes} or L{NoneType}
@param max_keys: If given, the maximum number of objects to
return.
@type max_keys: L{int} or L{NoneType}
@param prefix: If given, indicate that only objects with keys
beginning with this value should be returned.
@type prefix: L{bytes} or L{NoneType}
@return: A L{Deferred} that fires with a L{BucketListing}
describing the result.
@see: U{http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html}
"""
args = []
if marker is not None:
args.append(("marker", marker))
if max_keys is not None:
args.append(("max-keys", "%d" % (max_keys,)))
if prefix is not None:
args.append(("prefix", prefix))
if args:
object_name = "?" + urlencode(args)
else:
object_name = None
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name=object_name),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_get_bucket)
return d | [
"def",
"get_bucket",
"(",
"self",
",",
"bucket",
",",
"marker",
"=",
"None",
",",
"max_keys",
"=",
"None",
",",
"prefix",
"=",
"None",
")",
":",
"args",
"=",
"[",
"]",
"if",
"marker",
"is",
"not",
"None",
":",
"args",
".",
"append",
"(",
"(",
"\"... | Get a list of all the objects in a bucket.
@param bucket: The name of the bucket from which to retrieve objects.
@type bucket: L{unicode}
@param marker: If given, indicate a position in the overall
results where the results of this call should begin. The
first result is the first object that sorts greater than
this marker.
@type marker: L{bytes} or L{NoneType}
@param max_keys: If given, the maximum number of objects to
return.
@type max_keys: L{int} or L{NoneType}
@param prefix: If given, indicate that only objects with keys
beginning with this value should be returned.
@type prefix: L{bytes} or L{NoneType}
@return: A L{Deferred} that fires with a L{BucketListing}
describing the result.
@see: U{http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html} | [
"Get",
"a",
"list",
"of",
"all",
"the",
"objects",
"in",
"a",
"bucket",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/s3/client.py#L194-L237 | train | 36,851 |
twisted/txaws | txaws/s3/client.py | S3Client.get_bucket_lifecycle | def get_bucket_lifecycle(self, bucket):
"""
Get the lifecycle configuration of a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the bucket's lifecycle
configuration.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name="?lifecycle"),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_lifecycle_config)
return d | python | def get_bucket_lifecycle(self, bucket):
"""
Get the lifecycle configuration of a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the bucket's lifecycle
configuration.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name="?lifecycle"),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_lifecycle_config)
return d | [
"def",
"get_bucket_lifecycle",
"(",
"self",
",",
"bucket",
")",
":",
"details",
"=",
"self",
".",
"_details",
"(",
"method",
"=",
"b\"GET\"",
",",
"url_context",
"=",
"self",
".",
"_url_context",
"(",
"bucket",
"=",
"bucket",
",",
"object_name",
"=",
"\"?l... | Get the lifecycle configuration of a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the bucket's lifecycle
configuration. | [
"Get",
"the",
"lifecycle",
"configuration",
"of",
"a",
"bucket",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/s3/client.py#L289-L303 | train | 36,852 |
twisted/txaws | txaws/s3/client.py | S3Client.get_bucket_website_config | def get_bucket_website_config(self, bucket):
"""
Get the website configuration of a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the bucket's website
configuration.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name='?website'),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_website_config)
return d | python | def get_bucket_website_config(self, bucket):
"""
Get the website configuration of a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the bucket's website
configuration.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name='?website'),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_website_config)
return d | [
"def",
"get_bucket_website_config",
"(",
"self",
",",
"bucket",
")",
":",
"details",
"=",
"self",
".",
"_details",
"(",
"method",
"=",
"b\"GET\"",
",",
"url_context",
"=",
"self",
".",
"_url_context",
"(",
"bucket",
"=",
"bucket",
",",
"object_name",
"=",
... | Get the website configuration of a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the bucket's website
configuration. | [
"Get",
"the",
"website",
"configuration",
"of",
"a",
"bucket",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/s3/client.py#L320-L334 | train | 36,853 |
twisted/txaws | txaws/s3/client.py | S3Client.get_bucket_notification_config | def get_bucket_notification_config(self, bucket):
"""
Get the notification configuration of a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will request the bucket's notification
configuration.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name="?notification"),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_notification_config)
return d | python | def get_bucket_notification_config(self, bucket):
"""
Get the notification configuration of a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will request the bucket's notification
configuration.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name="?notification"),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_notification_config)
return d | [
"def",
"get_bucket_notification_config",
"(",
"self",
",",
"bucket",
")",
":",
"details",
"=",
"self",
".",
"_details",
"(",
"method",
"=",
"b\"GET\"",
",",
"url_context",
"=",
"self",
".",
"_url_context",
"(",
"bucket",
"=",
"bucket",
",",
"object_name",
"=... | Get the notification configuration of a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will request the bucket's notification
configuration. | [
"Get",
"the",
"notification",
"configuration",
"of",
"a",
"bucket",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/s3/client.py#L344-L358 | train | 36,854 |
twisted/txaws | txaws/s3/client.py | S3Client.get_bucket_versioning_config | def get_bucket_versioning_config(self, bucket):
"""
Get the versioning configuration of a bucket.
@param bucket: The name of the bucket. @return: A C{Deferred} that
will request the bucket's versioning configuration.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name="?versioning"),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_versioning_config)
return d | python | def get_bucket_versioning_config(self, bucket):
"""
Get the versioning configuration of a bucket.
@param bucket: The name of the bucket. @return: A C{Deferred} that
will request the bucket's versioning configuration.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name="?versioning"),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_versioning_config)
return d | [
"def",
"get_bucket_versioning_config",
"(",
"self",
",",
"bucket",
")",
":",
"details",
"=",
"self",
".",
"_details",
"(",
"method",
"=",
"b\"GET\"",
",",
"url_context",
"=",
"self",
".",
"_url_context",
"(",
"bucket",
"=",
"bucket",
",",
"object_name",
"=",... | Get the versioning configuration of a bucket.
@param bucket: The name of the bucket. @return: A C{Deferred} that
will request the bucket's versioning configuration. | [
"Get",
"the",
"versioning",
"configuration",
"of",
"a",
"bucket",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/s3/client.py#L368-L381 | train | 36,855 |
twisted/txaws | txaws/s3/client.py | S3Client.get_bucket_acl | def get_bucket_acl(self, bucket):
"""
Get the access control policy for a bucket.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name="?acl"),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_acl)
return d | python | def get_bucket_acl(self, bucket):
"""
Get the access control policy for a bucket.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name="?acl"),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_acl)
return d | [
"def",
"get_bucket_acl",
"(",
"self",
",",
"bucket",
")",
":",
"details",
"=",
"self",
".",
"_details",
"(",
"method",
"=",
"b\"GET\"",
",",
"url_context",
"=",
"self",
".",
"_url_context",
"(",
"bucket",
"=",
"bucket",
",",
"object_name",
"=",
"\"?acl\"",... | Get the access control policy for a bucket. | [
"Get",
"the",
"access",
"control",
"policy",
"for",
"a",
"bucket",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/s3/client.py#L391-L401 | train | 36,856 |
twisted/txaws | txaws/s3/client.py | S3Client.put_object | def put_object(self, bucket, object_name, data=None, content_type=None,
metadata={}, amz_headers={}, body_producer=None):
"""
Put an object in a bucket.
An existing object with the same name will be replaced.
@param bucket: The name of the bucket.
@param object_name: The name of the object.
@type object_name: L{unicode}
@param data: The data to write.
@param content_type: The type of data being written.
@param metadata: A C{dict} used to build C{x-amz-meta-*} headers.
@param amz_headers: A C{dict} used to build C{x-amz-*} headers.
@return: A C{Deferred} that will fire with the result of request.
"""
details = self._details(
method=b"PUT",
url_context=self._url_context(bucket=bucket, object_name=object_name),
headers=self._headers(content_type),
metadata=metadata,
amz_headers=amz_headers,
body=data,
body_producer=body_producer,
)
d = self._submit(self._query_factory(details))
d.addCallback(itemgetter(1))
return d | python | def put_object(self, bucket, object_name, data=None, content_type=None,
metadata={}, amz_headers={}, body_producer=None):
"""
Put an object in a bucket.
An existing object with the same name will be replaced.
@param bucket: The name of the bucket.
@param object_name: The name of the object.
@type object_name: L{unicode}
@param data: The data to write.
@param content_type: The type of data being written.
@param metadata: A C{dict} used to build C{x-amz-meta-*} headers.
@param amz_headers: A C{dict} used to build C{x-amz-*} headers.
@return: A C{Deferred} that will fire with the result of request.
"""
details = self._details(
method=b"PUT",
url_context=self._url_context(bucket=bucket, object_name=object_name),
headers=self._headers(content_type),
metadata=metadata,
amz_headers=amz_headers,
body=data,
body_producer=body_producer,
)
d = self._submit(self._query_factory(details))
d.addCallback(itemgetter(1))
return d | [
"def",
"put_object",
"(",
"self",
",",
"bucket",
",",
"object_name",
",",
"data",
"=",
"None",
",",
"content_type",
"=",
"None",
",",
"metadata",
"=",
"{",
"}",
",",
"amz_headers",
"=",
"{",
"}",
",",
"body_producer",
"=",
"None",
")",
":",
"details",
... | Put an object in a bucket.
An existing object with the same name will be replaced.
@param bucket: The name of the bucket.
@param object_name: The name of the object.
@type object_name: L{unicode}
@param data: The data to write.
@param content_type: The type of data being written.
@param metadata: A C{dict} used to build C{x-amz-meta-*} headers.
@param amz_headers: A C{dict} used to build C{x-amz-*} headers.
@return: A C{Deferred} that will fire with the result of request. | [
"Put",
"an",
"object",
"in",
"a",
"bucket",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/s3/client.py#L424-L451 | train | 36,857 |
twisted/txaws | txaws/s3/client.py | S3Client.copy_object | def copy_object(self, source_bucket, source_object_name, dest_bucket=None,
dest_object_name=None, metadata={}, amz_headers={}):
"""
Copy an object stored in S3 from a source bucket to a destination
bucket.
@param source_bucket: The S3 bucket to copy the object from.
@param source_object_name: The name of the object to copy.
@param dest_bucket: Optionally, the S3 bucket to copy the object to.
Defaults to C{source_bucket}.
@param dest_object_name: Optionally, the name of the new object.
Defaults to C{source_object_name}.
@param metadata: A C{dict} used to build C{x-amz-meta-*} headers.
@param amz_headers: A C{dict} used to build C{x-amz-*} headers.
@return: A C{Deferred} that will fire with the result of request.
"""
dest_bucket = dest_bucket or source_bucket
dest_object_name = dest_object_name or source_object_name
amz_headers["copy-source"] = "/%s/%s" % (source_bucket,
source_object_name)
details = self._details(
method=b"PUT",
url_context=self._url_context(
bucket=dest_bucket, object_name=dest_object_name,
),
metadata=metadata,
amz_headers=amz_headers,
)
d = self._submit(self._query_factory(details))
return d | python | def copy_object(self, source_bucket, source_object_name, dest_bucket=None,
dest_object_name=None, metadata={}, amz_headers={}):
"""
Copy an object stored in S3 from a source bucket to a destination
bucket.
@param source_bucket: The S3 bucket to copy the object from.
@param source_object_name: The name of the object to copy.
@param dest_bucket: Optionally, the S3 bucket to copy the object to.
Defaults to C{source_bucket}.
@param dest_object_name: Optionally, the name of the new object.
Defaults to C{source_object_name}.
@param metadata: A C{dict} used to build C{x-amz-meta-*} headers.
@param amz_headers: A C{dict} used to build C{x-amz-*} headers.
@return: A C{Deferred} that will fire with the result of request.
"""
dest_bucket = dest_bucket or source_bucket
dest_object_name = dest_object_name or source_object_name
amz_headers["copy-source"] = "/%s/%s" % (source_bucket,
source_object_name)
details = self._details(
method=b"PUT",
url_context=self._url_context(
bucket=dest_bucket, object_name=dest_object_name,
),
metadata=metadata,
amz_headers=amz_headers,
)
d = self._submit(self._query_factory(details))
return d | [
"def",
"copy_object",
"(",
"self",
",",
"source_bucket",
",",
"source_object_name",
",",
"dest_bucket",
"=",
"None",
",",
"dest_object_name",
"=",
"None",
",",
"metadata",
"=",
"{",
"}",
",",
"amz_headers",
"=",
"{",
"}",
")",
":",
"dest_bucket",
"=",
"des... | Copy an object stored in S3 from a source bucket to a destination
bucket.
@param source_bucket: The S3 bucket to copy the object from.
@param source_object_name: The name of the object to copy.
@param dest_bucket: Optionally, the S3 bucket to copy the object to.
Defaults to C{source_bucket}.
@param dest_object_name: Optionally, the name of the new object.
Defaults to C{source_object_name}.
@param metadata: A C{dict} used to build C{x-amz-meta-*} headers.
@param amz_headers: A C{dict} used to build C{x-amz-*} headers.
@return: A C{Deferred} that will fire with the result of request. | [
"Copy",
"an",
"object",
"stored",
"in",
"S3",
"from",
"a",
"source",
"bucket",
"to",
"a",
"destination",
"bucket",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/s3/client.py#L453-L482 | train | 36,858 |
twisted/txaws | txaws/s3/client.py | S3Client.get_object | def get_object(self, bucket, object_name):
"""
Get an object from a bucket.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name=object_name),
)
d = self._submit(self._query_factory(details))
d.addCallback(itemgetter(1))
return d | python | def get_object(self, bucket, object_name):
"""
Get an object from a bucket.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name=object_name),
)
d = self._submit(self._query_factory(details))
d.addCallback(itemgetter(1))
return d | [
"def",
"get_object",
"(",
"self",
",",
"bucket",
",",
"object_name",
")",
":",
"details",
"=",
"self",
".",
"_details",
"(",
"method",
"=",
"b\"GET\"",
",",
"url_context",
"=",
"self",
".",
"_url_context",
"(",
"bucket",
"=",
"bucket",
",",
"object_name",
... | Get an object from a bucket. | [
"Get",
"an",
"object",
"from",
"a",
"bucket",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/s3/client.py#L484-L494 | train | 36,859 |
twisted/txaws | txaws/s3/client.py | S3Client.head_object | def head_object(self, bucket, object_name):
"""
Retrieve object metadata only.
"""
details = self._details(
method=b"HEAD",
url_context=self._url_context(bucket=bucket, object_name=object_name),
)
d = self._submit(self._query_factory(details))
d.addCallback(lambda (response, body): _to_dict(response.responseHeaders))
return d | python | def head_object(self, bucket, object_name):
"""
Retrieve object metadata only.
"""
details = self._details(
method=b"HEAD",
url_context=self._url_context(bucket=bucket, object_name=object_name),
)
d = self._submit(self._query_factory(details))
d.addCallback(lambda (response, body): _to_dict(response.responseHeaders))
return d | [
"def",
"head_object",
"(",
"self",
",",
"bucket",
",",
"object_name",
")",
":",
"details",
"=",
"self",
".",
"_details",
"(",
"method",
"=",
"b\"HEAD\"",
",",
"url_context",
"=",
"self",
".",
"_url_context",
"(",
"bucket",
"=",
"bucket",
",",
"object_name"... | Retrieve object metadata only. | [
"Retrieve",
"object",
"metadata",
"only",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/s3/client.py#L496-L506 | train | 36,860 |
twisted/txaws | txaws/s3/client.py | S3Client.delete_object | def delete_object(self, bucket, object_name):
"""
Delete an object from a bucket.
Once deleted, there is no method to restore or undelete an object.
"""
details = self._details(
method=b"DELETE",
url_context=self._url_context(bucket=bucket, object_name=object_name),
)
d = self._submit(self._query_factory(details))
return d | python | def delete_object(self, bucket, object_name):
"""
Delete an object from a bucket.
Once deleted, there is no method to restore or undelete an object.
"""
details = self._details(
method=b"DELETE",
url_context=self._url_context(bucket=bucket, object_name=object_name),
)
d = self._submit(self._query_factory(details))
return d | [
"def",
"delete_object",
"(",
"self",
",",
"bucket",
",",
"object_name",
")",
":",
"details",
"=",
"self",
".",
"_details",
"(",
"method",
"=",
"b\"DELETE\"",
",",
"url_context",
"=",
"self",
".",
"_url_context",
"(",
"bucket",
"=",
"bucket",
",",
"object_n... | Delete an object from a bucket.
Once deleted, there is no method to restore or undelete an object. | [
"Delete",
"an",
"object",
"from",
"a",
"bucket",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/s3/client.py#L508-L519 | train | 36,861 |
twisted/txaws | txaws/s3/client.py | S3Client.put_object_acl | def put_object_acl(self, bucket, object_name, access_control_policy):
"""
Set access control policy on an object.
"""
data = access_control_policy.to_xml()
details = self._details(
method=b"PUT",
url_context=self._url_context(
bucket=bucket, object_name='%s?acl' % (object_name,),
),
body=data,
)
query = self._query_factory(details)
d = self._submit(query)
d.addCallback(self._parse_acl)
return d | python | def put_object_acl(self, bucket, object_name, access_control_policy):
"""
Set access control policy on an object.
"""
data = access_control_policy.to_xml()
details = self._details(
method=b"PUT",
url_context=self._url_context(
bucket=bucket, object_name='%s?acl' % (object_name,),
),
body=data,
)
query = self._query_factory(details)
d = self._submit(query)
d.addCallback(self._parse_acl)
return d | [
"def",
"put_object_acl",
"(",
"self",
",",
"bucket",
",",
"object_name",
",",
"access_control_policy",
")",
":",
"data",
"=",
"access_control_policy",
".",
"to_xml",
"(",
")",
"details",
"=",
"self",
".",
"_details",
"(",
"method",
"=",
"b\"PUT\"",
",",
"url... | Set access control policy on an object. | [
"Set",
"access",
"control",
"policy",
"on",
"an",
"object",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/s3/client.py#L521-L536 | train | 36,862 |
twisted/txaws | txaws/s3/client.py | S3Client.put_request_payment | def put_request_payment(self, bucket, payer):
"""
Set request payment configuration on bucket to payer.
@param bucket: The name of the bucket.
@param payer: The name of the payer.
@return: A C{Deferred} that will fire with the result of the request.
"""
data = RequestPayment(payer).to_xml()
details = self._details(
method=b"PUT",
url_context=self._url_context(bucket=bucket, object_name="?requestPayment"),
body=data,
)
d = self._submit(self._query_factory(details))
return d | python | def put_request_payment(self, bucket, payer):
"""
Set request payment configuration on bucket to payer.
@param bucket: The name of the bucket.
@param payer: The name of the payer.
@return: A C{Deferred} that will fire with the result of the request.
"""
data = RequestPayment(payer).to_xml()
details = self._details(
method=b"PUT",
url_context=self._url_context(bucket=bucket, object_name="?requestPayment"),
body=data,
)
d = self._submit(self._query_factory(details))
return d | [
"def",
"put_request_payment",
"(",
"self",
",",
"bucket",
",",
"payer",
")",
":",
"data",
"=",
"RequestPayment",
"(",
"payer",
")",
".",
"to_xml",
"(",
")",
"details",
"=",
"self",
".",
"_details",
"(",
"method",
"=",
"b\"PUT\"",
",",
"url_context",
"=",... | Set request payment configuration on bucket to payer.
@param bucket: The name of the bucket.
@param payer: The name of the payer.
@return: A C{Deferred} that will fire with the result of the request. | [
"Set",
"request",
"payment",
"configuration",
"on",
"bucket",
"to",
"payer",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/s3/client.py#L550-L565 | train | 36,863 |
twisted/txaws | txaws/s3/client.py | S3Client.get_request_payment | def get_request_payment(self, bucket):
"""
Get the request payment configuration on a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the name of the payer.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name="?requestPayment"),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_get_request_payment)
return d | python | def get_request_payment(self, bucket):
"""
Get the request payment configuration on a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the name of the payer.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name="?requestPayment"),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_get_request_payment)
return d | [
"def",
"get_request_payment",
"(",
"self",
",",
"bucket",
")",
":",
"details",
"=",
"self",
".",
"_details",
"(",
"method",
"=",
"b\"GET\"",
",",
"url_context",
"=",
"self",
".",
"_url_context",
"(",
"bucket",
"=",
"bucket",
",",
"object_name",
"=",
"\"?re... | Get the request payment configuration on a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the name of the payer. | [
"Get",
"the",
"request",
"payment",
"configuration",
"on",
"a",
"bucket",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/s3/client.py#L567-L580 | train | 36,864 |
twisted/txaws | txaws/s3/client.py | S3Client.init_multipart_upload | def init_multipart_upload(self, bucket, object_name, content_type=None,
amz_headers={}, metadata={}):
"""
Initiate a multipart upload to a bucket.
@param bucket: The name of the bucket
@param object_name: The object name
@param content_type: The Content-Type for the object
@param metadata: C{dict} containing additional metadata
@param amz_headers: A C{dict} used to build C{x-amz-*} headers.
@return: C{str} upload_id
"""
objectname_plus = '%s?uploads' % object_name
details = self._details(
method=b"POST",
url_context=self._url_context(bucket=bucket, object_name=objectname_plus),
headers=self._headers(content_type),
metadata=metadata,
amz_headers=amz_headers,
)
d = self._submit(self._query_factory(details))
d.addCallback(
lambda (response, body): MultipartInitiationResponse.from_xml(body)
)
return d | python | def init_multipart_upload(self, bucket, object_name, content_type=None,
amz_headers={}, metadata={}):
"""
Initiate a multipart upload to a bucket.
@param bucket: The name of the bucket
@param object_name: The object name
@param content_type: The Content-Type for the object
@param metadata: C{dict} containing additional metadata
@param amz_headers: A C{dict} used to build C{x-amz-*} headers.
@return: C{str} upload_id
"""
objectname_plus = '%s?uploads' % object_name
details = self._details(
method=b"POST",
url_context=self._url_context(bucket=bucket, object_name=objectname_plus),
headers=self._headers(content_type),
metadata=metadata,
amz_headers=amz_headers,
)
d = self._submit(self._query_factory(details))
d.addCallback(
lambda (response, body): MultipartInitiationResponse.from_xml(body)
)
return d | [
"def",
"init_multipart_upload",
"(",
"self",
",",
"bucket",
",",
"object_name",
",",
"content_type",
"=",
"None",
",",
"amz_headers",
"=",
"{",
"}",
",",
"metadata",
"=",
"{",
"}",
")",
":",
"objectname_plus",
"=",
"'%s?uploads'",
"%",
"object_name",
"detail... | Initiate a multipart upload to a bucket.
@param bucket: The name of the bucket
@param object_name: The object name
@param content_type: The Content-Type for the object
@param metadata: C{dict} containing additional metadata
@param amz_headers: A C{dict} used to build C{x-amz-*} headers.
@return: C{str} upload_id | [
"Initiate",
"a",
"multipart",
"upload",
"to",
"a",
"bucket",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/s3/client.py#L589-L613 | train | 36,865 |
twisted/txaws | txaws/s3/client.py | S3Client.upload_part | def upload_part(self, bucket, object_name, upload_id, part_number,
data=None, content_type=None, metadata={},
body_producer=None):
"""
Upload a part of data corresponding to a multipart upload.
@param bucket: The bucket name
@param object_name: The object name
@param upload_id: The multipart upload id
@param part_number: The part number
@param data: Data (optional, requires body_producer if not specified)
@param content_type: The Content-Type
@param metadata: Additional metadata
@param body_producer: an C{IBodyProducer} (optional, requires data if
not specified)
@return: the C{Deferred} from underlying query.submit() call
"""
parms = 'partNumber=%s&uploadId=%s' % (str(part_number), upload_id)
objectname_plus = '%s?%s' % (object_name, parms)
details = self._details(
method=b"PUT",
url_context=self._url_context(bucket=bucket, object_name=objectname_plus),
headers=self._headers(content_type),
metadata=metadata,
body=data,
)
d = self._submit(self._query_factory(details))
d.addCallback(lambda (response, data): _to_dict(response.responseHeaders))
return d | python | def upload_part(self, bucket, object_name, upload_id, part_number,
data=None, content_type=None, metadata={},
body_producer=None):
"""
Upload a part of data corresponding to a multipart upload.
@param bucket: The bucket name
@param object_name: The object name
@param upload_id: The multipart upload id
@param part_number: The part number
@param data: Data (optional, requires body_producer if not specified)
@param content_type: The Content-Type
@param metadata: Additional metadata
@param body_producer: an C{IBodyProducer} (optional, requires data if
not specified)
@return: the C{Deferred} from underlying query.submit() call
"""
parms = 'partNumber=%s&uploadId=%s' % (str(part_number), upload_id)
objectname_plus = '%s?%s' % (object_name, parms)
details = self._details(
method=b"PUT",
url_context=self._url_context(bucket=bucket, object_name=objectname_plus),
headers=self._headers(content_type),
metadata=metadata,
body=data,
)
d = self._submit(self._query_factory(details))
d.addCallback(lambda (response, data): _to_dict(response.responseHeaders))
return d | [
"def",
"upload_part",
"(",
"self",
",",
"bucket",
",",
"object_name",
",",
"upload_id",
",",
"part_number",
",",
"data",
"=",
"None",
",",
"content_type",
"=",
"None",
",",
"metadata",
"=",
"{",
"}",
",",
"body_producer",
"=",
"None",
")",
":",
"parms",
... | Upload a part of data corresponding to a multipart upload.
@param bucket: The bucket name
@param object_name: The object name
@param upload_id: The multipart upload id
@param part_number: The part number
@param data: Data (optional, requires body_producer if not specified)
@param content_type: The Content-Type
@param metadata: Additional metadata
@param body_producer: an C{IBodyProducer} (optional, requires data if
not specified)
@return: the C{Deferred} from underlying query.submit() call | [
"Upload",
"a",
"part",
"of",
"data",
"corresponding",
"to",
"a",
"multipart",
"upload",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/s3/client.py#L615-L643 | train | 36,866 |
twisted/txaws | txaws/s3/client.py | Query.set_content_type | def set_content_type(self):
"""
Set the content type based on the file extension used in the object
name.
"""
if self.object_name and not self.content_type:
# XXX nothing is currently done with the encoding... we may
# need to in the future
self.content_type, encoding = mimetypes.guess_type(
self.object_name, strict=False) | python | def set_content_type(self):
"""
Set the content type based on the file extension used in the object
name.
"""
if self.object_name and not self.content_type:
# XXX nothing is currently done with the encoding... we may
# need to in the future
self.content_type, encoding = mimetypes.guess_type(
self.object_name, strict=False) | [
"def",
"set_content_type",
"(",
"self",
")",
":",
"if",
"self",
".",
"object_name",
"and",
"not",
"self",
".",
"content_type",
":",
"# XXX nothing is currently done with the encoding... we may",
"# need to in the future",
"self",
".",
"content_type",
",",
"encoding",
"=... | Set the content type based on the file extension used in the object
name. | [
"Set",
"the",
"content",
"type",
"based",
"on",
"the",
"file",
"extension",
"used",
"in",
"the",
"object",
"name",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/s3/client.py#L734-L743 | train | 36,867 |
twisted/txaws | txaws/s3/client.py | Query.get_headers | def get_headers(self, instant):
"""
Build the list of headers needed in order to perform S3 operations.
"""
headers = {'x-amz-date': _auth_v4.makeAMZDate(instant)}
if self.body_producer is None:
data = self.data
if data is None:
data = b""
headers["x-amz-content-sha256"] = hashlib.sha256(data).hexdigest()
else:
data = None
headers["x-amz-content-sha256"] = b"UNSIGNED-PAYLOAD"
for key, value in self.metadata.iteritems():
headers["x-amz-meta-" + key] = value
for key, value in self.amz_headers.iteritems():
headers["x-amz-" + key] = value
# Before we check if the content type is set, let's see if we can set
# it by guessing the the mimetype.
self.set_content_type()
if self.content_type is not None:
headers["Content-Type"] = self.content_type
if self.creds is not None:
headers["Authorization"] = self.sign(
headers,
data,
s3_url_context(self.endpoint, self.bucket, self.object_name),
instant,
method=self.action)
return headers | python | def get_headers(self, instant):
"""
Build the list of headers needed in order to perform S3 operations.
"""
headers = {'x-amz-date': _auth_v4.makeAMZDate(instant)}
if self.body_producer is None:
data = self.data
if data is None:
data = b""
headers["x-amz-content-sha256"] = hashlib.sha256(data).hexdigest()
else:
data = None
headers["x-amz-content-sha256"] = b"UNSIGNED-PAYLOAD"
for key, value in self.metadata.iteritems():
headers["x-amz-meta-" + key] = value
for key, value in self.amz_headers.iteritems():
headers["x-amz-" + key] = value
# Before we check if the content type is set, let's see if we can set
# it by guessing the the mimetype.
self.set_content_type()
if self.content_type is not None:
headers["Content-Type"] = self.content_type
if self.creds is not None:
headers["Authorization"] = self.sign(
headers,
data,
s3_url_context(self.endpoint, self.bucket, self.object_name),
instant,
method=self.action)
return headers | [
"def",
"get_headers",
"(",
"self",
",",
"instant",
")",
":",
"headers",
"=",
"{",
"'x-amz-date'",
":",
"_auth_v4",
".",
"makeAMZDate",
"(",
"instant",
")",
"}",
"if",
"self",
".",
"body_producer",
"is",
"None",
":",
"data",
"=",
"self",
".",
"data",
"i... | Build the list of headers needed in order to perform S3 operations. | [
"Build",
"the",
"list",
"of",
"headers",
"needed",
"in",
"order",
"to",
"perform",
"S3",
"operations",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/s3/client.py#L745-L775 | train | 36,868 |
UUDigitalHumanitieslab/tei_reader | tei_reader/models/element.py | Element.attributes | def attributes(self):
if 'id' in self.node.attrib:
yield PlaceholderAttribute('id', self.node.attrib['id'])
if 'tei-tag' in self.node.attrib:
yield PlaceholderAttribute('tei-tag', self.node.attrib['tei-tag'])
"""Contain attributes applicable to this element"""
for attributes in self.node.iterchildren('attributes'):
for attribute in self.__iter_attributes__(attributes):
yield attribute | python | def attributes(self):
if 'id' in self.node.attrib:
yield PlaceholderAttribute('id', self.node.attrib['id'])
if 'tei-tag' in self.node.attrib:
yield PlaceholderAttribute('tei-tag', self.node.attrib['tei-tag'])
"""Contain attributes applicable to this element"""
for attributes in self.node.iterchildren('attributes'):
for attribute in self.__iter_attributes__(attributes):
yield attribute | [
"def",
"attributes",
"(",
"self",
")",
":",
"if",
"'id'",
"in",
"self",
".",
"node",
".",
"attrib",
":",
"yield",
"PlaceholderAttribute",
"(",
"'id'",
",",
"self",
".",
"node",
".",
"attrib",
"[",
"'id'",
"]",
")",
"if",
"'tei-tag'",
"in",
"self",
".... | Contain attributes applicable to this element | [
"Contain",
"attributes",
"applicable",
"to",
"this",
"element"
] | 7b19c34a9d7cc941a36ecdcf6f361e26c6488697 | https://github.com/UUDigitalHumanitieslab/tei_reader/blob/7b19c34a9d7cc941a36ecdcf6f361e26c6488697/tei_reader/models/element.py#L14-L24 | train | 36,869 |
UUDigitalHumanitieslab/tei_reader | tei_reader/models/element.py | Element.divisions | def divisions(self):
"""
Recursively get all the text divisions directly part of this element. If an element contains parts or text without tag. Those will be returned in order and wrapped with a TextDivision.
"""
from .placeholder_division import PlaceholderDivision
placeholder = None
for item in self.__parts_and_divisions:
if item.tag == 'part':
if not placeholder:
placeholder = PlaceholderDivision()
placeholder.parts.append(item)
else:
if placeholder:
yield placeholder
placeholder = None
yield item
if placeholder:
yield placeholder | python | def divisions(self):
"""
Recursively get all the text divisions directly part of this element. If an element contains parts or text without tag. Those will be returned in order and wrapped with a TextDivision.
"""
from .placeholder_division import PlaceholderDivision
placeholder = None
for item in self.__parts_and_divisions:
if item.tag == 'part':
if not placeholder:
placeholder = PlaceholderDivision()
placeholder.parts.append(item)
else:
if placeholder:
yield placeholder
placeholder = None
yield item
if placeholder:
yield placeholder | [
"def",
"divisions",
"(",
"self",
")",
":",
"from",
".",
"placeholder_division",
"import",
"PlaceholderDivision",
"placeholder",
"=",
"None",
"for",
"item",
"in",
"self",
".",
"__parts_and_divisions",
":",
"if",
"item",
".",
"tag",
"==",
"'part'",
":",
"if",
... | Recursively get all the text divisions directly part of this element. If an element contains parts or text without tag. Those will be returned in order and wrapped with a TextDivision. | [
"Recursively",
"get",
"all",
"the",
"text",
"divisions",
"directly",
"part",
"of",
"this",
"element",
".",
"If",
"an",
"element",
"contains",
"parts",
"or",
"text",
"without",
"tag",
".",
"Those",
"will",
"be",
"returned",
"in",
"order",
"and",
"wrapped",
... | 7b19c34a9d7cc941a36ecdcf6f361e26c6488697 | https://github.com/UUDigitalHumanitieslab/tei_reader/blob/7b19c34a9d7cc941a36ecdcf6f361e26c6488697/tei_reader/models/element.py#L32-L51 | train | 36,870 |
UUDigitalHumanitieslab/tei_reader | tei_reader/models/element.py | Element.parts | def parts(self):
"""
Get the parts directly below this element.
"""
for item in self.__parts_and_divisions:
if item.tag == 'part':
yield item
else:
# Divisions shouldn't be beneath a part, but here's a fallback
# for if this does happen
for part in item.parts:
yield part | python | def parts(self):
"""
Get the parts directly below this element.
"""
for item in self.__parts_and_divisions:
if item.tag == 'part':
yield item
else:
# Divisions shouldn't be beneath a part, but here's a fallback
# for if this does happen
for part in item.parts:
yield part | [
"def",
"parts",
"(",
"self",
")",
":",
"for",
"item",
"in",
"self",
".",
"__parts_and_divisions",
":",
"if",
"item",
".",
"tag",
"==",
"'part'",
":",
"yield",
"item",
"else",
":",
"# Divisions shouldn't be beneath a part, but here's a fallback",
"# for if this does ... | Get the parts directly below this element. | [
"Get",
"the",
"parts",
"directly",
"below",
"this",
"element",
"."
] | 7b19c34a9d7cc941a36ecdcf6f361e26c6488697 | https://github.com/UUDigitalHumanitieslab/tei_reader/blob/7b19c34a9d7cc941a36ecdcf6f361e26c6488697/tei_reader/models/element.py#L69-L81 | train | 36,871 |
UUDigitalHumanitieslab/tei_reader | tei_reader/models/element.py | Element.__parts_and_divisions | def __parts_and_divisions(self):
"""
The parts and divisions directly part of this element.
"""
from .division import Division
from .part import Part
from .placeholder_part import PlaceholderPart
text = self.node.text
if text:
stripped_text = text.replace('\n', '')
if stripped_text.strip():
yield PlaceholderPart(stripped_text)
for item in self.node:
if item.tag == 'part':
yield Part(item)
elif item.tag == 'div':
yield Division(item)
if item.tail:
stripped_tail = item.tail.replace('\n', '')
if stripped_tail.strip():
yield PlaceholderPart(stripped_tail) | python | def __parts_and_divisions(self):
"""
The parts and divisions directly part of this element.
"""
from .division import Division
from .part import Part
from .placeholder_part import PlaceholderPart
text = self.node.text
if text:
stripped_text = text.replace('\n', '')
if stripped_text.strip():
yield PlaceholderPart(stripped_text)
for item in self.node:
if item.tag == 'part':
yield Part(item)
elif item.tag == 'div':
yield Division(item)
if item.tail:
stripped_tail = item.tail.replace('\n', '')
if stripped_tail.strip():
yield PlaceholderPart(stripped_tail) | [
"def",
"__parts_and_divisions",
"(",
"self",
")",
":",
"from",
".",
"division",
"import",
"Division",
"from",
".",
"part",
"import",
"Part",
"from",
".",
"placeholder_part",
"import",
"PlaceholderPart",
"text",
"=",
"self",
".",
"node",
".",
"text",
"if",
"t... | The parts and divisions directly part of this element. | [
"The",
"parts",
"and",
"divisions",
"directly",
"part",
"of",
"this",
"element",
"."
] | 7b19c34a9d7cc941a36ecdcf6f361e26c6488697 | https://github.com/UUDigitalHumanitieslab/tei_reader/blob/7b19c34a9d7cc941a36ecdcf6f361e26c6488697/tei_reader/models/element.py#L88-L112 | train | 36,872 |
T-002/pycast | pycast/errors/baseerrormeasure.py | BaseErrorMeasure._get_error_values | def _get_error_values(self, startingPercentage, endPercentage, startDate, endDate):
"""Gets the defined subset of self._errorValues.
Both parameters will be correct at this time.
:param float startingPercentage: Defines the start of the interval. This has to be a value in [0.0, 100.0].
It represents the value, where the error calculation should be started.
25.0 for example means that the first 25% of all calculated errors will be ignored.
:param float endPercentage: Defines the end of the interval. This has to be a value in [0.0, 100.0].
It represents the value, after which all error values will be ignored. 90.0 for example means that
the last 10% of all local errors will be ignored.
:param float startDate: Epoch representing the start date used for error calculation.
:param float endDate: Epoch representing the end date used in the error calculation.
:return: Returns a list with the defined error values.
:rtype: list
:raise: Raises a ValueError if startDate or endDate do not represent correct boundaries for error calculation.
"""
if startDate is not None:
possibleDates = filter(lambda date: date >= startDate, self._errorDates)
if 0 == len(possibleDates):
raise ValueError("%s does not represent a valid startDate." % startDate)
startIdx = self._errorDates.index(min(possibleDates))
else:
startIdx = int((startingPercentage * len(self._errorValues)) / 100.0)
if endDate is not None:
possibleDates = filter(lambda date: date <= endDate, self._errorDates)
if 0 == len(possibleDates):
raise ValueError("%s does not represent a valid endDate." % endDate)
endIdx = self._errorDates.index(max(possibleDates)) + 1
else:
endIdx = int((endPercentage * len(self._errorValues)) / 100.0)
return self._errorValues[startIdx:endIdx] | python | def _get_error_values(self, startingPercentage, endPercentage, startDate, endDate):
"""Gets the defined subset of self._errorValues.
Both parameters will be correct at this time.
:param float startingPercentage: Defines the start of the interval. This has to be a value in [0.0, 100.0].
It represents the value, where the error calculation should be started.
25.0 for example means that the first 25% of all calculated errors will be ignored.
:param float endPercentage: Defines the end of the interval. This has to be a value in [0.0, 100.0].
It represents the value, after which all error values will be ignored. 90.0 for example means that
the last 10% of all local errors will be ignored.
:param float startDate: Epoch representing the start date used for error calculation.
:param float endDate: Epoch representing the end date used in the error calculation.
:return: Returns a list with the defined error values.
:rtype: list
:raise: Raises a ValueError if startDate or endDate do not represent correct boundaries for error calculation.
"""
if startDate is not None:
possibleDates = filter(lambda date: date >= startDate, self._errorDates)
if 0 == len(possibleDates):
raise ValueError("%s does not represent a valid startDate." % startDate)
startIdx = self._errorDates.index(min(possibleDates))
else:
startIdx = int((startingPercentage * len(self._errorValues)) / 100.0)
if endDate is not None:
possibleDates = filter(lambda date: date <= endDate, self._errorDates)
if 0 == len(possibleDates):
raise ValueError("%s does not represent a valid endDate." % endDate)
endIdx = self._errorDates.index(max(possibleDates)) + 1
else:
endIdx = int((endPercentage * len(self._errorValues)) / 100.0)
return self._errorValues[startIdx:endIdx] | [
"def",
"_get_error_values",
"(",
"self",
",",
"startingPercentage",
",",
"endPercentage",
",",
"startDate",
",",
"endDate",
")",
":",
"if",
"startDate",
"is",
"not",
"None",
":",
"possibleDates",
"=",
"filter",
"(",
"lambda",
"date",
":",
"date",
">=",
"star... | Gets the defined subset of self._errorValues.
Both parameters will be correct at this time.
:param float startingPercentage: Defines the start of the interval. This has to be a value in [0.0, 100.0].
It represents the value, where the error calculation should be started.
25.0 for example means that the first 25% of all calculated errors will be ignored.
:param float endPercentage: Defines the end of the interval. This has to be a value in [0.0, 100.0].
It represents the value, after which all error values will be ignored. 90.0 for example means that
the last 10% of all local errors will be ignored.
:param float startDate: Epoch representing the start date used for error calculation.
:param float endDate: Epoch representing the end date used in the error calculation.
:return: Returns a list with the defined error values.
:rtype: list
:raise: Raises a ValueError if startDate or endDate do not represent correct boundaries for error calculation. | [
"Gets",
"the",
"defined",
"subset",
"of",
"self",
".",
"_errorValues",
"."
] | 8a53505c6d8367e0ea572e8af768e80b29e1cc41 | https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/errors/baseerrormeasure.py#L107-L144 | train | 36,873 |
T-002/pycast | pycast/errors/baseerrormeasure.py | BaseErrorMeasure.confidence_interval | def confidence_interval(self, confidenceLevel):
"""Calculates for which value confidenceLevel% of the errors are closer to 0.
:param float confidenceLevel: percentage of the errors that should be
smaller than the returned value for overestimations and larger than
the returned value for underestimations.
confidenceLevel has to be in [0.0, 1.0]
:return: return a tuple containing the underestimation and overestimation for
the given confidenceLevel
:rtype: tuple
:warning: Index is still not calculated correctly
"""
if not (confidenceLevel >= 0 and confidenceLevel <= 1):
raise ValueError("Parameter percentage has to be in [0,1]")
underestimations = []
overestimations = []
for error in self._errorValues:
if error is None:
# None was in the lists causing some confidenceLevels not be calculated, not sure if that was intended, I suggested ignoring None values
continue
#Want 0 errors in both lists!
if error >= 0:
overestimations.append(error)
if error <= 0:
underestimations.append(error)
#sort and cut off at confidence level.
overestimations.sort()
underestimations.sort(reverse=True)
overIdx = int(len(overestimations) * confidenceLevel) - 1
underIdx = int(len(underestimations) * confidenceLevel) - 1
overestimation = 0.0
underestimation = 0.0
if overIdx >= 0:
overestimation = overestimations[overIdx]
else:
print len(overestimations), confidenceLevel
if underIdx >= 0:
underestimation = underestimations[underIdx]
return underestimation, overestimation | python | def confidence_interval(self, confidenceLevel):
"""Calculates for which value confidenceLevel% of the errors are closer to 0.
:param float confidenceLevel: percentage of the errors that should be
smaller than the returned value for overestimations and larger than
the returned value for underestimations.
confidenceLevel has to be in [0.0, 1.0]
:return: return a tuple containing the underestimation and overestimation for
the given confidenceLevel
:rtype: tuple
:warning: Index is still not calculated correctly
"""
if not (confidenceLevel >= 0 and confidenceLevel <= 1):
raise ValueError("Parameter percentage has to be in [0,1]")
underestimations = []
overestimations = []
for error in self._errorValues:
if error is None:
# None was in the lists causing some confidenceLevels not be calculated, not sure if that was intended, I suggested ignoring None values
continue
#Want 0 errors in both lists!
if error >= 0:
overestimations.append(error)
if error <= 0:
underestimations.append(error)
#sort and cut off at confidence level.
overestimations.sort()
underestimations.sort(reverse=True)
overIdx = int(len(overestimations) * confidenceLevel) - 1
underIdx = int(len(underestimations) * confidenceLevel) - 1
overestimation = 0.0
underestimation = 0.0
if overIdx >= 0:
overestimation = overestimations[overIdx]
else:
print len(overestimations), confidenceLevel
if underIdx >= 0:
underestimation = underestimations[underIdx]
return underestimation, overestimation | [
"def",
"confidence_interval",
"(",
"self",
",",
"confidenceLevel",
")",
":",
"if",
"not",
"(",
"confidenceLevel",
">=",
"0",
"and",
"confidenceLevel",
"<=",
"1",
")",
":",
"raise",
"ValueError",
"(",
"\"Parameter percentage has to be in [0,1]\"",
")",
"underestimati... | Calculates for which value confidenceLevel% of the errors are closer to 0.
:param float confidenceLevel: percentage of the errors that should be
smaller than the returned value for overestimations and larger than
the returned value for underestimations.
confidenceLevel has to be in [0.0, 1.0]
:return: return a tuple containing the underestimation and overestimation for
the given confidenceLevel
:rtype: tuple
:warning: Index is still not calculated correctly | [
"Calculates",
"for",
"which",
"value",
"confidenceLevel%",
"of",
"the",
"errors",
"are",
"closer",
"to",
"0",
"."
] | 8a53505c6d8367e0ea572e8af768e80b29e1cc41 | https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/errors/baseerrormeasure.py#L220-L268 | train | 36,874 |
pdkit/pdkit | pdkit/utils.py | load_segmented_data | def load_segmented_data(filename):
"""
Helper function to load segmented gait time series data.
:param filename: The full path of the file that contais our data. This should be a comma separated value (csv file).
:type filename: str
:return: The gait time series segmented data, with a x, y, z, mag_acc_sum and segmented columns.
:rtype: pandas.DataFrame
"""
data = pd.read_csv(filename, index_col=0)
data.index = data.index.astype(np.datetime64)
return data | python | def load_segmented_data(filename):
"""
Helper function to load segmented gait time series data.
:param filename: The full path of the file that contais our data. This should be a comma separated value (csv file).
:type filename: str
:return: The gait time series segmented data, with a x, y, z, mag_acc_sum and segmented columns.
:rtype: pandas.DataFrame
"""
data = pd.read_csv(filename, index_col=0)
data.index = data.index.astype(np.datetime64)
return data | [
"def",
"load_segmented_data",
"(",
"filename",
")",
":",
"data",
"=",
"pd",
".",
"read_csv",
"(",
"filename",
",",
"index_col",
"=",
"0",
")",
"data",
".",
"index",
"=",
"data",
".",
"index",
".",
"astype",
"(",
"np",
".",
"datetime64",
")",
"return",
... | Helper function to load segmented gait time series data.
:param filename: The full path of the file that contais our data. This should be a comma separated value (csv file).
:type filename: str
:return: The gait time series segmented data, with a x, y, z, mag_acc_sum and segmented columns.
:rtype: pandas.DataFrame | [
"Helper",
"function",
"to",
"load",
"segmented",
"gait",
"time",
"series",
"data",
"."
] | c7120263da2071bb139815fbdb56ca77b544f340 | https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/utils.py#L81-L94 | train | 36,875 |
pdkit/pdkit | pdkit/utils.py | load_finger_tapping_cloudupdrs_data | def load_finger_tapping_cloudupdrs_data(filename, convert_times=1000.0):
"""
This method loads data in the cloudupdrs format for the finger tapping processor
Usually the data will be saved in a csv file and it should look like this:
.. code-block:: json
timestamp_0, . , action_type_0, x_0, y_0, . , . , x_target_0, y_target_0
timestamp_1, . , action_type_1, x_1, y_1, . , . , x_target_1, y_target_1
timestamp_2, . , action_type_2, x_2, y_2, . , . , x_target_2, y_target_2
.
.
.
timestamp_n, . , action_type_n, x_n, y_n, . , . , x_target_n, y_target_n
where data_frame.x, data_frame.y: components of tapping position. data_frame.x_target,
data_frame.y_target their target.
:param filename: The path to load data from
:type filename: string
:param convert_times: Convert times. The default is from from milliseconds to seconds.
:type convert_times: float
"""
data_m = np.genfromtxt(filename, delimiter=',', invalid_raise=False, skip_footer=1)
date_times = pd.to_datetime((data_m[:, 0] - data_m[0, 0]))
time_difference = (data_m[:, 0] - data_m[0, 0]) / convert_times
data = {'td': time_difference, 'action_type': data_m[:, 2],'x': data_m[:, 3], 'y': data_m[:, 4],
'x_target': data_m[:, 7], 'y_target': data_m[:, 8]}
data_frame = pd.DataFrame(data, index=date_times, columns=['td', 'action_type','x', 'y', 'x_target', 'y_target'])
return data_frame | python | def load_finger_tapping_cloudupdrs_data(filename, convert_times=1000.0):
"""
This method loads data in the cloudupdrs format for the finger tapping processor
Usually the data will be saved in a csv file and it should look like this:
.. code-block:: json
timestamp_0, . , action_type_0, x_0, y_0, . , . , x_target_0, y_target_0
timestamp_1, . , action_type_1, x_1, y_1, . , . , x_target_1, y_target_1
timestamp_2, . , action_type_2, x_2, y_2, . , . , x_target_2, y_target_2
.
.
.
timestamp_n, . , action_type_n, x_n, y_n, . , . , x_target_n, y_target_n
where data_frame.x, data_frame.y: components of tapping position. data_frame.x_target,
data_frame.y_target their target.
:param filename: The path to load data from
:type filename: string
:param convert_times: Convert times. The default is from from milliseconds to seconds.
:type convert_times: float
"""
data_m = np.genfromtxt(filename, delimiter=',', invalid_raise=False, skip_footer=1)
date_times = pd.to_datetime((data_m[:, 0] - data_m[0, 0]))
time_difference = (data_m[:, 0] - data_m[0, 0]) / convert_times
data = {'td': time_difference, 'action_type': data_m[:, 2],'x': data_m[:, 3], 'y': data_m[:, 4],
'x_target': data_m[:, 7], 'y_target': data_m[:, 8]}
data_frame = pd.DataFrame(data, index=date_times, columns=['td', 'action_type','x', 'y', 'x_target', 'y_target'])
return data_frame | [
"def",
"load_finger_tapping_cloudupdrs_data",
"(",
"filename",
",",
"convert_times",
"=",
"1000.0",
")",
":",
"data_m",
"=",
"np",
".",
"genfromtxt",
"(",
"filename",
",",
"delimiter",
"=",
"','",
",",
"invalid_raise",
"=",
"False",
",",
"skip_footer",
"=",
"1... | This method loads data in the cloudupdrs format for the finger tapping processor
Usually the data will be saved in a csv file and it should look like this:
.. code-block:: json
timestamp_0, . , action_type_0, x_0, y_0, . , . , x_target_0, y_target_0
timestamp_1, . , action_type_1, x_1, y_1, . , . , x_target_1, y_target_1
timestamp_2, . , action_type_2, x_2, y_2, . , . , x_target_2, y_target_2
.
.
.
timestamp_n, . , action_type_n, x_n, y_n, . , . , x_target_n, y_target_n
where data_frame.x, data_frame.y: components of tapping position. data_frame.x_target,
data_frame.y_target their target.
:param filename: The path to load data from
:type filename: string
:param convert_times: Convert times. The default is from from milliseconds to seconds.
:type convert_times: float | [
"This",
"method",
"loads",
"data",
"in",
"the",
"cloudupdrs",
"format",
"for",
"the",
"finger",
"tapping",
"processor"
] | c7120263da2071bb139815fbdb56ca77b544f340 | https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/utils.py#L211-L242 | train | 36,876 |
pdkit/pdkit | pdkit/utils.py | numerical_integration | def numerical_integration(signal, sampling_frequency):
"""
Numerically integrate a signal with it's sampling frequency.
:param signal: A 1-dimensional array or list (the signal).
:type signal: array
:param sampling_frequency: The sampling frequency for the signal.
:type sampling_frequency: float
:return: The integrated signal.
:rtype: numpy.ndarray
"""
integrate = sum(signal[1:]) / sampling_frequency + sum(signal[:-1])
integrate /= sampling_frequency * 2
return np.array(integrate) | python | def numerical_integration(signal, sampling_frequency):
"""
Numerically integrate a signal with it's sampling frequency.
:param signal: A 1-dimensional array or list (the signal).
:type signal: array
:param sampling_frequency: The sampling frequency for the signal.
:type sampling_frequency: float
:return: The integrated signal.
:rtype: numpy.ndarray
"""
integrate = sum(signal[1:]) / sampling_frequency + sum(signal[:-1])
integrate /= sampling_frequency * 2
return np.array(integrate) | [
"def",
"numerical_integration",
"(",
"signal",
",",
"sampling_frequency",
")",
":",
"integrate",
"=",
"sum",
"(",
"signal",
"[",
"1",
":",
"]",
")",
"/",
"sampling_frequency",
"+",
"sum",
"(",
"signal",
"[",
":",
"-",
"1",
"]",
")",
"integrate",
"/=",
... | Numerically integrate a signal with it's sampling frequency.
:param signal: A 1-dimensional array or list (the signal).
:type signal: array
:param sampling_frequency: The sampling frequency for the signal.
:type sampling_frequency: float
:return: The integrated signal.
:rtype: numpy.ndarray | [
"Numerically",
"integrate",
"a",
"signal",
"with",
"it",
"s",
"sampling",
"frequency",
"."
] | c7120263da2071bb139815fbdb56ca77b544f340 | https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/utils.py#L327-L342 | train | 36,877 |
pdkit/pdkit | pdkit/utils.py | compute_interpeak | def compute_interpeak(data, sample_rate):
"""
Compute number of samples between signal peaks using the real part of FFT.
:param data: 1-dimensional time series data.
:type data: array
:param sample_rate: Sample rate of accelerometer reading (Hz)
:type sample_rate: float
:return interpeak: Number of samples between peaks
:rtype interpeak: int
:Examples:
>>> import numpy as np
>>> from mhealthx.signals import compute_interpeak
>>> data = np.random.random(10000)
>>> sample_rate = 100
>>> interpeak = compute_interpeak(data, sample_rate)
"""
# Real part of FFT:
freqs = fftfreq(data.size, d=1.0/sample_rate)
f_signal = rfft(data)
# Maximum non-zero frequency:
imax_freq = np.argsort(f_signal)[-2]
freq = np.abs(freqs[imax_freq])
# Inter-peak samples:
interpeak = np.int(np.round(sample_rate / freq))
return interpeak | python | def compute_interpeak(data, sample_rate):
"""
Compute number of samples between signal peaks using the real part of FFT.
:param data: 1-dimensional time series data.
:type data: array
:param sample_rate: Sample rate of accelerometer reading (Hz)
:type sample_rate: float
:return interpeak: Number of samples between peaks
:rtype interpeak: int
:Examples:
>>> import numpy as np
>>> from mhealthx.signals import compute_interpeak
>>> data = np.random.random(10000)
>>> sample_rate = 100
>>> interpeak = compute_interpeak(data, sample_rate)
"""
# Real part of FFT:
freqs = fftfreq(data.size, d=1.0/sample_rate)
f_signal = rfft(data)
# Maximum non-zero frequency:
imax_freq = np.argsort(f_signal)[-2]
freq = np.abs(freqs[imax_freq])
# Inter-peak samples:
interpeak = np.int(np.round(sample_rate / freq))
return interpeak | [
"def",
"compute_interpeak",
"(",
"data",
",",
"sample_rate",
")",
":",
"# Real part of FFT:",
"freqs",
"=",
"fftfreq",
"(",
"data",
".",
"size",
",",
"d",
"=",
"1.0",
"/",
"sample_rate",
")",
"f_signal",
"=",
"rfft",
"(",
"data",
")",
"# Maximum non-zero fre... | Compute number of samples between signal peaks using the real part of FFT.
:param data: 1-dimensional time series data.
:type data: array
:param sample_rate: Sample rate of accelerometer reading (Hz)
:type sample_rate: float
:return interpeak: Number of samples between peaks
:rtype interpeak: int
:Examples:
>>> import numpy as np
>>> from mhealthx.signals import compute_interpeak
>>> data = np.random.random(10000)
>>> sample_rate = 100
>>> interpeak = compute_interpeak(data, sample_rate) | [
"Compute",
"number",
"of",
"samples",
"between",
"signal",
"peaks",
"using",
"the",
"real",
"part",
"of",
"FFT",
"."
] | c7120263da2071bb139815fbdb56ca77b544f340 | https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/utils.py#L430-L461 | train | 36,878 |
pdkit/pdkit | pdkit/utils.py | non_zero_row | def non_zero_row(arr):
"""
0. Empty row returns False.
>>> arr = array([])
>>> non_zero_row(arr)
False
1. Row with a zero returns False.
>>> arr = array([1, 4, 3, 0, 5, -1, -2])
>>> non_zero_row(arr)
False
2. Row with no zeros returns True.
>>> arr = array([-1, -0.1, 0.001, 2])
>>> non_zero_row(arr)
True
:param arr: array
:type arr: numpy array
:return empty: If row is completely free of zeros
:rtype empty: bool
"""
if len(arr) == 0:
return False
for item in arr:
if item == 0:
return False
return True | python | def non_zero_row(arr):
"""
0. Empty row returns False.
>>> arr = array([])
>>> non_zero_row(arr)
False
1. Row with a zero returns False.
>>> arr = array([1, 4, 3, 0, 5, -1, -2])
>>> non_zero_row(arr)
False
2. Row with no zeros returns True.
>>> arr = array([-1, -0.1, 0.001, 2])
>>> non_zero_row(arr)
True
:param arr: array
:type arr: numpy array
:return empty: If row is completely free of zeros
:rtype empty: bool
"""
if len(arr) == 0:
return False
for item in arr:
if item == 0:
return False
return True | [
"def",
"non_zero_row",
"(",
"arr",
")",
":",
"if",
"len",
"(",
"arr",
")",
"==",
"0",
":",
"return",
"False",
"for",
"item",
"in",
"arr",
":",
"if",
"item",
"==",
"0",
":",
"return",
"False",
"return",
"True"
] | 0. Empty row returns False.
>>> arr = array([])
>>> non_zero_row(arr)
False
1. Row with a zero returns False.
>>> arr = array([1, 4, 3, 0, 5, -1, -2])
>>> non_zero_row(arr)
False
2. Row with no zeros returns True.
>>> arr = array([-1, -0.1, 0.001, 2])
>>> non_zero_row(arr)
True
:param arr: array
:type arr: numpy array
:return empty: If row is completely free of zeros
:rtype empty: bool | [
"0",
".",
"Empty",
"row",
"returns",
"False",
"."
] | c7120263da2071bb139815fbdb56ca77b544f340 | https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/utils.py#L944-L979 | train | 36,879 |
Duke-GCB/DukeDSClient | ddsc/versioncheck.py | get_pypi_version | def get_pypi_version():
"""
Returns the version info from pypi for this app.
"""
try:
response = requests.get(PYPI_URL, timeout=HALF_SECOND_TIMEOUT)
response.raise_for_status()
data = response.json()
version_str = data["info"]["version"]
return _parse_version_str(version_str)
except requests.exceptions.ConnectionError:
raise VersionException(UNABLE_TO_ACCESS_PYPI + " Failed to connect.")
except requests.exceptions.Timeout:
raise VersionException(UNABLE_TO_ACCESS_PYPI + " Timeout") | python | def get_pypi_version():
"""
Returns the version info from pypi for this app.
"""
try:
response = requests.get(PYPI_URL, timeout=HALF_SECOND_TIMEOUT)
response.raise_for_status()
data = response.json()
version_str = data["info"]["version"]
return _parse_version_str(version_str)
except requests.exceptions.ConnectionError:
raise VersionException(UNABLE_TO_ACCESS_PYPI + " Failed to connect.")
except requests.exceptions.Timeout:
raise VersionException(UNABLE_TO_ACCESS_PYPI + " Timeout") | [
"def",
"get_pypi_version",
"(",
")",
":",
"try",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"PYPI_URL",
",",
"timeout",
"=",
"HALF_SECOND_TIMEOUT",
")",
"response",
".",
"raise_for_status",
"(",
")",
"data",
"=",
"response",
".",
"json",
"(",
")",
... | Returns the version info from pypi for this app. | [
"Returns",
"the",
"version",
"info",
"from",
"pypi",
"for",
"this",
"app",
"."
] | 117f68fb9bae82e4c81ea487ad5d61ac350f3726 | https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/versioncheck.py#L18-L31 | train | 36,880 |
Duke-GCB/DukeDSClient | ddsc/core/parallel.py | TaskExecutor.start_tasks | def start_tasks(self):
"""
Start however many tasks we can based on our limits and what we have left to finish.
"""
while self.tasks_at_once > len(self.pending_results) and self._has_more_tasks():
task, parent_result = self.tasks.popleft()
self.execute_task(task, parent_result) | python | def start_tasks(self):
"""
Start however many tasks we can based on our limits and what we have left to finish.
"""
while self.tasks_at_once > len(self.pending_results) and self._has_more_tasks():
task, parent_result = self.tasks.popleft()
self.execute_task(task, parent_result) | [
"def",
"start_tasks",
"(",
"self",
")",
":",
"while",
"self",
".",
"tasks_at_once",
">",
"len",
"(",
"self",
".",
"pending_results",
")",
"and",
"self",
".",
"_has_more_tasks",
"(",
")",
":",
"task",
",",
"parent_result",
"=",
"self",
".",
"tasks",
".",
... | Start however many tasks we can based on our limits and what we have left to finish. | [
"Start",
"however",
"many",
"tasks",
"we",
"can",
"based",
"on",
"our",
"limits",
"and",
"what",
"we",
"have",
"left",
"to",
"finish",
"."
] | 117f68fb9bae82e4c81ea487ad5d61ac350f3726 | https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/parallel.py#L206-L212 | train | 36,881 |
Duke-GCB/DukeDSClient | ddsc/core/parallel.py | TaskExecutor.get_finished_results | def get_finished_results(self):
"""
Go through pending results and retrieve the results if they are done.
Then start child tasks for the task that finished.
"""
task_and_results = []
for pending_result in self.pending_results:
if pending_result.ready():
ret = pending_result.get()
task_id, result = ret
task = self.task_id_to_task[task_id]
# process any pending messages for this task (will also process other tasks messages)
self.process_all_messages_in_queue()
task.after_run(result)
task_and_results.append((task, result))
self.pending_results.remove(pending_result)
return task_and_results | python | def get_finished_results(self):
"""
Go through pending results and retrieve the results if they are done.
Then start child tasks for the task that finished.
"""
task_and_results = []
for pending_result in self.pending_results:
if pending_result.ready():
ret = pending_result.get()
task_id, result = ret
task = self.task_id_to_task[task_id]
# process any pending messages for this task (will also process other tasks messages)
self.process_all_messages_in_queue()
task.after_run(result)
task_and_results.append((task, result))
self.pending_results.remove(pending_result)
return task_and_results | [
"def",
"get_finished_results",
"(",
"self",
")",
":",
"task_and_results",
"=",
"[",
"]",
"for",
"pending_result",
"in",
"self",
".",
"pending_results",
":",
"if",
"pending_result",
".",
"ready",
"(",
")",
":",
"ret",
"=",
"pending_result",
".",
"get",
"(",
... | Go through pending results and retrieve the results if they are done.
Then start child tasks for the task that finished. | [
"Go",
"through",
"pending",
"results",
"and",
"retrieve",
"the",
"results",
"if",
"they",
"are",
"done",
".",
"Then",
"start",
"child",
"tasks",
"for",
"the",
"task",
"that",
"finished",
"."
] | 117f68fb9bae82e4c81ea487ad5d61ac350f3726 | https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/parallel.py#L247-L263 | train | 36,882 |
twisted/txaws | txaws/route53/client.py | get_route53_client | def get_route53_client(agent, region, cooperator=None):
"""
Get a non-registration Route53 client.
"""
if cooperator is None:
cooperator = task
return region.get_client(
_Route53Client,
agent=agent,
creds=region.creds,
region=REGION_US_EAST_1,
endpoint=AWSServiceEndpoint(_OTHER_ENDPOINT),
cooperator=cooperator,
) | python | def get_route53_client(agent, region, cooperator=None):
"""
Get a non-registration Route53 client.
"""
if cooperator is None:
cooperator = task
return region.get_client(
_Route53Client,
agent=agent,
creds=region.creds,
region=REGION_US_EAST_1,
endpoint=AWSServiceEndpoint(_OTHER_ENDPOINT),
cooperator=cooperator,
) | [
"def",
"get_route53_client",
"(",
"agent",
",",
"region",
",",
"cooperator",
"=",
"None",
")",
":",
"if",
"cooperator",
"is",
"None",
":",
"cooperator",
"=",
"task",
"return",
"region",
".",
"get_client",
"(",
"_Route53Client",
",",
"agent",
"=",
"agent",
... | Get a non-registration Route53 client. | [
"Get",
"a",
"non",
"-",
"registration",
"Route53",
"client",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/route53/client.py#L60-L73 | train | 36,883 |
twisted/txaws | txaws/server/registry.py | Registry.add | def add(self, method_class, action, version=None):
"""Add a method class to the regitry.
@param method_class: The method class to add
@param action: The action that the method class can handle
@param version: The version that the method class can handle
"""
by_version = self._by_action.setdefault(action, {})
if version in by_version:
raise RuntimeError("A method was already registered for action"
" %s in version %s" % (action, version))
by_version[version] = method_class | python | def add(self, method_class, action, version=None):
"""Add a method class to the regitry.
@param method_class: The method class to add
@param action: The action that the method class can handle
@param version: The version that the method class can handle
"""
by_version = self._by_action.setdefault(action, {})
if version in by_version:
raise RuntimeError("A method was already registered for action"
" %s in version %s" % (action, version))
by_version[version] = method_class | [
"def",
"add",
"(",
"self",
",",
"method_class",
",",
"action",
",",
"version",
"=",
"None",
")",
":",
"by_version",
"=",
"self",
".",
"_by_action",
".",
"setdefault",
"(",
"action",
",",
"{",
"}",
")",
"if",
"version",
"in",
"by_version",
":",
"raise",... | Add a method class to the regitry.
@param method_class: The method class to add
@param action: The action that the method class can handle
@param version: The version that the method class can handle | [
"Add",
"a",
"method",
"class",
"to",
"the",
"regitry",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/server/registry.py#L10-L21 | train | 36,884 |
twisted/txaws | txaws/server/registry.py | Registry.check | def check(self, action, version=None):
"""Check if the given action is supported in the given version.
@raises APIError: If there's no method class registered for handling
the given action or version.
"""
if action not in self._by_action:
raise APIError(400, "InvalidAction", "The action %s is not valid "
"for this web service." % action)
by_version = self._by_action[action]
if None not in by_version:
# There's no catch-all method, let's try the version-specific one
if version not in by_version:
raise APIError(400, "InvalidVersion", "Invalid API version.") | python | def check(self, action, version=None):
"""Check if the given action is supported in the given version.
@raises APIError: If there's no method class registered for handling
the given action or version.
"""
if action not in self._by_action:
raise APIError(400, "InvalidAction", "The action %s is not valid "
"for this web service." % action)
by_version = self._by_action[action]
if None not in by_version:
# There's no catch-all method, let's try the version-specific one
if version not in by_version:
raise APIError(400, "InvalidVersion", "Invalid API version.") | [
"def",
"check",
"(",
"self",
",",
"action",
",",
"version",
"=",
"None",
")",
":",
"if",
"action",
"not",
"in",
"self",
".",
"_by_action",
":",
"raise",
"APIError",
"(",
"400",
",",
"\"InvalidAction\"",
",",
"\"The action %s is not valid \"",
"\"for this web s... | Check if the given action is supported in the given version.
@raises APIError: If there's no method class registered for handling
the given action or version. | [
"Check",
"if",
"the",
"given",
"action",
"is",
"supported",
"in",
"the",
"given",
"version",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/server/registry.py#L23-L36 | train | 36,885 |
twisted/txaws | txaws/server/registry.py | Registry.get | def get(self, action, version=None):
"""Get the method class handing the given action and version."""
by_version = self._by_action[action]
if version in by_version:
return by_version[version]
else:
return by_version[None] | python | def get(self, action, version=None):
"""Get the method class handing the given action and version."""
by_version = self._by_action[action]
if version in by_version:
return by_version[version]
else:
return by_version[None] | [
"def",
"get",
"(",
"self",
",",
"action",
",",
"version",
"=",
"None",
")",
":",
"by_version",
"=",
"self",
".",
"_by_action",
"[",
"action",
"]",
"if",
"version",
"in",
"by_version",
":",
"return",
"by_version",
"[",
"version",
"]",
"else",
":",
"retu... | Get the method class handing the given action and version. | [
"Get",
"the",
"method",
"class",
"handing",
"the",
"given",
"action",
"and",
"version",
"."
] | 5c3317376cd47e536625027e38c3b37840175ce0 | https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/server/registry.py#L38-L44 | train | 36,886 |
T-002/pycast | pycast/methods/regression.py | Regression.calculate_parameters | def calculate_parameters(self, independentTs, dependentTs):
"""Calculate and return the parameters for the regression line
Return the parameter for the line describing the relationship
between the input variables.
:param Timeseries independentTs: The Timeseries used for the
independent variable (x-axis). The Timeseries must have
at least 2 datapoints with different dates and values
:param Timeseries dependentTs: The Timeseries used as the
dependent variable (y-axis). The Timeseries must have
at least 2 datapoints, which dates match with independentTs
:return: A tuple containing the y-axis intercept and the slope
used to execute the regression
:rtype: tuple
:raise: Raises an :py:exc:`ValueError` if
- independentTs and dependentTs have not at least two matching dates
- independentTs has only one distinct value
- The dates in one or both Timeseries are not distinct.
"""
listX, listY = self.match_time_series(independentTs, dependentTs)
if len(listX) == 0 or len(listY) == 0:
raise ValueError("Lists need to have some equal dates or cannot be empty")
if len(listX) != len(listY):
raise ValueError("Each Timeseries need to have distinct dates")
xValues = map(lambda item: item[1], listX)
yValues = map(lambda item: item[1], listY)
xMean = FusionMethods["mean"](xValues)
yMean = FusionMethods["mean"](yValues)
xDeviation = map(lambda item: (item - xMean), xValues)
yDeviation = map(lambda item: (item - yMean), yValues)
try:
parameter1 = sum(x * y for x, y in zip(xDeviation, yDeviation)) / sum(x * x for x in xDeviation)
except ZeroDivisionError:
# error occures if xDeviation is always 0, which means that all x values are the same
raise ValueError("Not enough distinct x values")
parameter0 = yMean - (parameter1 * xMean)
return (parameter0, parameter1) | python | def calculate_parameters(self, independentTs, dependentTs):
"""Calculate and return the parameters for the regression line
Return the parameter for the line describing the relationship
between the input variables.
:param Timeseries independentTs: The Timeseries used for the
independent variable (x-axis). The Timeseries must have
at least 2 datapoints with different dates and values
:param Timeseries dependentTs: The Timeseries used as the
dependent variable (y-axis). The Timeseries must have
at least 2 datapoints, which dates match with independentTs
:return: A tuple containing the y-axis intercept and the slope
used to execute the regression
:rtype: tuple
:raise: Raises an :py:exc:`ValueError` if
- independentTs and dependentTs have not at least two matching dates
- independentTs has only one distinct value
- The dates in one or both Timeseries are not distinct.
"""
listX, listY = self.match_time_series(independentTs, dependentTs)
if len(listX) == 0 or len(listY) == 0:
raise ValueError("Lists need to have some equal dates or cannot be empty")
if len(listX) != len(listY):
raise ValueError("Each Timeseries need to have distinct dates")
xValues = map(lambda item: item[1], listX)
yValues = map(lambda item: item[1], listY)
xMean = FusionMethods["mean"](xValues)
yMean = FusionMethods["mean"](yValues)
xDeviation = map(lambda item: (item - xMean), xValues)
yDeviation = map(lambda item: (item - yMean), yValues)
try:
parameter1 = sum(x * y for x, y in zip(xDeviation, yDeviation)) / sum(x * x for x in xDeviation)
except ZeroDivisionError:
# error occures if xDeviation is always 0, which means that all x values are the same
raise ValueError("Not enough distinct x values")
parameter0 = yMean - (parameter1 * xMean)
return (parameter0, parameter1) | [
"def",
"calculate_parameters",
"(",
"self",
",",
"independentTs",
",",
"dependentTs",
")",
":",
"listX",
",",
"listY",
"=",
"self",
".",
"match_time_series",
"(",
"independentTs",
",",
"dependentTs",
")",
"if",
"len",
"(",
"listX",
")",
"==",
"0",
"or",
"l... | Calculate and return the parameters for the regression line
Return the parameter for the line describing the relationship
between the input variables.
:param Timeseries independentTs: The Timeseries used for the
independent variable (x-axis). The Timeseries must have
at least 2 datapoints with different dates and values
:param Timeseries dependentTs: The Timeseries used as the
dependent variable (y-axis). The Timeseries must have
at least 2 datapoints, which dates match with independentTs
:return: A tuple containing the y-axis intercept and the slope
used to execute the regression
:rtype: tuple
:raise: Raises an :py:exc:`ValueError` if
- independentTs and dependentTs have not at least two matching dates
- independentTs has only one distinct value
- The dates in one or both Timeseries are not distinct. | [
"Calculate",
"and",
"return",
"the",
"parameters",
"for",
"the",
"regression",
"line"
] | 8a53505c6d8367e0ea572e8af768e80b29e1cc41 | https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/methods/regression.py#L35-L79 | train | 36,887 |
T-002/pycast | pycast/methods/regression.py | Regression.calculate_parameters_with_confidence | def calculate_parameters_with_confidence(self, independentTs, dependentTs, confidenceLevel, samplePercentage=.1):
"""Same functionality as calculate_parameters, just that additionally
the confidence interval for a given confidenceLevel is calculated.
This is done based on a sample of the dependentTs training data that is validated
against the prediction. The signed error of the predictions and the sample is then
used to calculate the bounds of the interval.
further reading: http://en.wikipedia.org/wiki/Confidence_interval
:param Timeseries independentTs: The Timeseries used for the
independent variable (x-axis). The Timeseries must have
at least 2 datapoints with different dates and values
:param Timeseries dependentTs: The Timeseries used as the
dependent variable (y-axis). The Timeseries must have
at least 2 datapoints, which dates match with independentTs
:param float confidenceLevel: The percentage of entries in the sample that should
have an prediction error closer or equal to 0 than the bounds of the confidence interval.
:param float samplePercentage: How much of the dependentTs should be used for sampling
:return: A tuple containing the y-axis intercept and the slope
used to execute the regression and the (underestimation, overestimation)
for the given confidenceLevel
:rtype: tuple
:raise: Raises an :py:exc:`ValueError` if
- independentTs and dependentTs have not at least two matching dates
- independentTs has only one distinct value
- The dates in one or both Timeseries are not distinct.
"""
#First split the time series into sample and training data
sampleY, trainingY = dependentTs.sample(samplePercentage)
sampleX_list = self.match_time_series(sampleY, independentTs)[1]
trainingX_list = self.match_time_series(trainingY, independentTs)[1]
sampleX = TimeSeries.from_twodim_list(sampleX_list)
trainingX = TimeSeries.from_twodim_list(trainingX_list)
#Then calculate parameters based on the training data
n, m = self.calculate_parameters(trainingX, trainingY)
#predict
prediction = self.predict(sampleX, n, m)
#calculate the signed error at each location, note that MSD(x,y) != MSD(y,x)
msd = MSD()
msd.initialize(prediction, sampleY)
return (n, m, msd.confidence_interval(confidenceLevel)) | python | def calculate_parameters_with_confidence(self, independentTs, dependentTs, confidenceLevel, samplePercentage=.1):
"""Same functionality as calculate_parameters, just that additionally
the confidence interval for a given confidenceLevel is calculated.
This is done based on a sample of the dependentTs training data that is validated
against the prediction. The signed error of the predictions and the sample is then
used to calculate the bounds of the interval.
further reading: http://en.wikipedia.org/wiki/Confidence_interval
:param Timeseries independentTs: The Timeseries used for the
independent variable (x-axis). The Timeseries must have
at least 2 datapoints with different dates and values
:param Timeseries dependentTs: The Timeseries used as the
dependent variable (y-axis). The Timeseries must have
at least 2 datapoints, which dates match with independentTs
:param float confidenceLevel: The percentage of entries in the sample that should
have an prediction error closer or equal to 0 than the bounds of the confidence interval.
:param float samplePercentage: How much of the dependentTs should be used for sampling
:return: A tuple containing the y-axis intercept and the slope
used to execute the regression and the (underestimation, overestimation)
for the given confidenceLevel
:rtype: tuple
:raise: Raises an :py:exc:`ValueError` if
- independentTs and dependentTs have not at least two matching dates
- independentTs has only one distinct value
- The dates in one or both Timeseries are not distinct.
"""
#First split the time series into sample and training data
sampleY, trainingY = dependentTs.sample(samplePercentage)
sampleX_list = self.match_time_series(sampleY, independentTs)[1]
trainingX_list = self.match_time_series(trainingY, independentTs)[1]
sampleX = TimeSeries.from_twodim_list(sampleX_list)
trainingX = TimeSeries.from_twodim_list(trainingX_list)
#Then calculate parameters based on the training data
n, m = self.calculate_parameters(trainingX, trainingY)
#predict
prediction = self.predict(sampleX, n, m)
#calculate the signed error at each location, note that MSD(x,y) != MSD(y,x)
msd = MSD()
msd.initialize(prediction, sampleY)
return (n, m, msd.confidence_interval(confidenceLevel)) | [
"def",
"calculate_parameters_with_confidence",
"(",
"self",
",",
"independentTs",
",",
"dependentTs",
",",
"confidenceLevel",
",",
"samplePercentage",
"=",
".1",
")",
":",
"#First split the time series into sample and training data",
"sampleY",
",",
"trainingY",
"=",
"depen... | Same functionality as calculate_parameters, just that additionally
the confidence interval for a given confidenceLevel is calculated.
This is done based on a sample of the dependentTs training data that is validated
against the prediction. The signed error of the predictions and the sample is then
used to calculate the bounds of the interval.
further reading: http://en.wikipedia.org/wiki/Confidence_interval
:param Timeseries independentTs: The Timeseries used for the
independent variable (x-axis). The Timeseries must have
at least 2 datapoints with different dates and values
:param Timeseries dependentTs: The Timeseries used as the
dependent variable (y-axis). The Timeseries must have
at least 2 datapoints, which dates match with independentTs
:param float confidenceLevel: The percentage of entries in the sample that should
have an prediction error closer or equal to 0 than the bounds of the confidence interval.
:param float samplePercentage: How much of the dependentTs should be used for sampling
:return: A tuple containing the y-axis intercept and the slope
used to execute the regression and the (underestimation, overestimation)
for the given confidenceLevel
:rtype: tuple
:raise: Raises an :py:exc:`ValueError` if
- independentTs and dependentTs have not at least two matching dates
- independentTs has only one distinct value
- The dates in one or both Timeseries are not distinct. | [
"Same",
"functionality",
"as",
"calculate_parameters",
"just",
"that",
"additionally",
"the",
"confidence",
"interval",
"for",
"a",
"given",
"confidenceLevel",
"is",
"calculated",
".",
"This",
"is",
"done",
"based",
"on",
"a",
"sample",
"of",
"the",
"dependentTs",... | 8a53505c6d8367e0ea572e8af768e80b29e1cc41 | https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/methods/regression.py#L81-L128 | train | 36,888 |
T-002/pycast | pycast/methods/regression.py | Regression.match_time_series | def match_time_series(self, timeseries1, timeseries2):
"""Return two lists of the two input time series with matching dates
:param TimeSeries timeseries1: The first timeseries
:param TimeSeries timeseries2: The second timeseries
:return: Two two dimensional lists containing the matched values,
:rtype: two List
"""
time1 = map(lambda item: item[0], timeseries1.to_twodim_list())
time2 = map(lambda item: item[0], timeseries2.to_twodim_list())
matches = filter(lambda x: (x in time1), time2)
listX = filter(lambda x: (x[0] in matches), timeseries1.to_twodim_list())
listY = filter(lambda x: (x[0] in matches), timeseries2.to_twodim_list())
return listX, listY | python | def match_time_series(self, timeseries1, timeseries2):
"""Return two lists of the two input time series with matching dates
:param TimeSeries timeseries1: The first timeseries
:param TimeSeries timeseries2: The second timeseries
:return: Two two dimensional lists containing the matched values,
:rtype: two List
"""
time1 = map(lambda item: item[0], timeseries1.to_twodim_list())
time2 = map(lambda item: item[0], timeseries2.to_twodim_list())
matches = filter(lambda x: (x in time1), time2)
listX = filter(lambda x: (x[0] in matches), timeseries1.to_twodim_list())
listY = filter(lambda x: (x[0] in matches), timeseries2.to_twodim_list())
return listX, listY | [
"def",
"match_time_series",
"(",
"self",
",",
"timeseries1",
",",
"timeseries2",
")",
":",
"time1",
"=",
"map",
"(",
"lambda",
"item",
":",
"item",
"[",
"0",
"]",
",",
"timeseries1",
".",
"to_twodim_list",
"(",
")",
")",
"time2",
"=",
"map",
"(",
"lamb... | Return two lists of the two input time series with matching dates
:param TimeSeries timeseries1: The first timeseries
:param TimeSeries timeseries2: The second timeseries
:return: Two two dimensional lists containing the matched values,
:rtype: two List | [
"Return",
"two",
"lists",
"of",
"the",
"two",
"input",
"time",
"series",
"with",
"matching",
"dates"
] | 8a53505c6d8367e0ea572e8af768e80b29e1cc41 | https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/methods/regression.py#L153-L168 | train | 36,889 |
T-002/pycast | pycast/methods/regression.py | LinearRegression.lstsq | def lstsq(cls, a, b):
"""Return the least-squares solution to a linear matrix equation.
:param Matrix a: Design matrix with the values of the independent variables.
:param Matrix b: Matrix with the "dependent variable" values.
b can only have one column.
:raise: Raises an :py:exc:`ValueError`, if
- the number of rows of a and b does not match.
- b has more than one column.
:note: The algorithm solves the following equations.
beta = a^+ b.
"""
# Check if the size of the input matrices matches
if a.get_height() != b.get_height():
raise ValueError("Size of input matrices does not match")
if b.get_width() != 1:
raise ValueError("Matrix with dependent variable has more than 1 column")
aPseudo = a.pseudoinverse()
# The following code could be used if c is regular.
# aTrans = a.transform()
# c = aTrans * a
# invers() raises an ValueError, if c is not invertible
# cInvers = c.invers()
# beta = cInvers * aTrans * b
beta = aPseudo * b
return beta | python | def lstsq(cls, a, b):
"""Return the least-squares solution to a linear matrix equation.
:param Matrix a: Design matrix with the values of the independent variables.
:param Matrix b: Matrix with the "dependent variable" values.
b can only have one column.
:raise: Raises an :py:exc:`ValueError`, if
- the number of rows of a and b does not match.
- b has more than one column.
:note: The algorithm solves the following equations.
beta = a^+ b.
"""
# Check if the size of the input matrices matches
if a.get_height() != b.get_height():
raise ValueError("Size of input matrices does not match")
if b.get_width() != 1:
raise ValueError("Matrix with dependent variable has more than 1 column")
aPseudo = a.pseudoinverse()
# The following code could be used if c is regular.
# aTrans = a.transform()
# c = aTrans * a
# invers() raises an ValueError, if c is not invertible
# cInvers = c.invers()
# beta = cInvers * aTrans * b
beta = aPseudo * b
return beta | [
"def",
"lstsq",
"(",
"cls",
",",
"a",
",",
"b",
")",
":",
"# Check if the size of the input matrices matches",
"if",
"a",
".",
"get_height",
"(",
")",
"!=",
"b",
".",
"get_height",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"Size of input matrices does not mat... | Return the least-squares solution to a linear matrix equation.
:param Matrix a: Design matrix with the values of the independent variables.
:param Matrix b: Matrix with the "dependent variable" values.
b can only have one column.
:raise: Raises an :py:exc:`ValueError`, if
- the number of rows of a and b does not match.
- b has more than one column.
:note: The algorithm solves the following equations.
beta = a^+ b. | [
"Return",
"the",
"least",
"-",
"squares",
"solution",
"to",
"a",
"linear",
"matrix",
"equation",
"."
] | 8a53505c6d8367e0ea572e8af768e80b29e1cc41 | https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/methods/regression.py#L174-L200 | train | 36,890 |
T-002/pycast | pycast/errors/meanabsolutescalederror.py | MeanAbsoluteScaledError._get_historic_means | def _get_historic_means(self, timeSeries):
"""Calculates the mean value for the history of the MeanAbsoluteScaledError.
:param TimeSeries timeSeries: Original TimeSeries used to calculate the mean historic values.
:return: Returns a list containing the historic means.
:rtype: list
"""
# calculate the history values
historyLength = self._historyLength
historicMeans = []
append = historicMeans.append
# not most optimized loop in case of calculation operations
for startIdx in xrange(len(timeSeries) - historyLength - 1):
value = 0
for idx in xrange(startIdx, startIdx + historyLength):
value += abs(timeSeries[idx+1][1] - timeSeries[idx][1])
append(value / float(historyLength))
return historicMeans | python | def _get_historic_means(self, timeSeries):
"""Calculates the mean value for the history of the MeanAbsoluteScaledError.
:param TimeSeries timeSeries: Original TimeSeries used to calculate the mean historic values.
:return: Returns a list containing the historic means.
:rtype: list
"""
# calculate the history values
historyLength = self._historyLength
historicMeans = []
append = historicMeans.append
# not most optimized loop in case of calculation operations
for startIdx in xrange(len(timeSeries) - historyLength - 1):
value = 0
for idx in xrange(startIdx, startIdx + historyLength):
value += abs(timeSeries[idx+1][1] - timeSeries[idx][1])
append(value / float(historyLength))
return historicMeans | [
"def",
"_get_historic_means",
"(",
"self",
",",
"timeSeries",
")",
":",
"# calculate the history values",
"historyLength",
"=",
"self",
".",
"_historyLength",
"historicMeans",
"=",
"[",
"]",
"append",
"=",
"historicMeans",
".",
"append",
"# not most optimized loop in ca... | Calculates the mean value for the history of the MeanAbsoluteScaledError.
:param TimeSeries timeSeries: Original TimeSeries used to calculate the mean historic values.
:return: Returns a list containing the historic means.
:rtype: list | [
"Calculates",
"the",
"mean",
"value",
"for",
"the",
"history",
"of",
"the",
"MeanAbsoluteScaledError",
"."
] | 8a53505c6d8367e0ea572e8af768e80b29e1cc41 | https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/errors/meanabsolutescalederror.py#L63-L84 | train | 36,891 |
pdkit/pdkit | pdkit/updrs.py | UPDRS.write_model | def write_model(self, filename='scores', filepath='', output_format='csv'):
"""
This method calculates the scores and writes them to a file the data frame received. If the output format
is other than 'csv' it will print the scores.
:param filename: the name to give to the file
:type filename: string
:param filepath: the path to save the file
:type filepath: string
:param output_format: the format of the file to write ('csv')
:type output_format: string
"""
scores_array = np.array([])
for obs in self.observations:
c, sd = self.__get_centroids_sd(obs)
points, ids = self.__get_features_for_observation(observation=obs, last_column_is_id=True)
b = np.array([])
for p in points:
b = np.append(b, [self.get_single_score(p, centroids=c, sd=sd)])
scores_array = np.vstack([scores_array, b]) if scores_array.size else b
scores_array = np.concatenate((ids[:, np.newaxis], scores_array.transpose()), axis=1)
header = 'id,'+','.join(self.observations)
try:
if output_format == 'csv':
filename = join(filepath, filename) + '.' + output_format
np.savetxt(filename, scores_array, delimiter=",", fmt='%i', header=header,comments='')
else:
print(scores_array)
except:
logging.error("Unexpected error on writing output") | python | def write_model(self, filename='scores', filepath='', output_format='csv'):
"""
This method calculates the scores and writes them to a file the data frame received. If the output format
is other than 'csv' it will print the scores.
:param filename: the name to give to the file
:type filename: string
:param filepath: the path to save the file
:type filepath: string
:param output_format: the format of the file to write ('csv')
:type output_format: string
"""
scores_array = np.array([])
for obs in self.observations:
c, sd = self.__get_centroids_sd(obs)
points, ids = self.__get_features_for_observation(observation=obs, last_column_is_id=True)
b = np.array([])
for p in points:
b = np.append(b, [self.get_single_score(p, centroids=c, sd=sd)])
scores_array = np.vstack([scores_array, b]) if scores_array.size else b
scores_array = np.concatenate((ids[:, np.newaxis], scores_array.transpose()), axis=1)
header = 'id,'+','.join(self.observations)
try:
if output_format == 'csv':
filename = join(filepath, filename) + '.' + output_format
np.savetxt(filename, scores_array, delimiter=",", fmt='%i', header=header,comments='')
else:
print(scores_array)
except:
logging.error("Unexpected error on writing output") | [
"def",
"write_model",
"(",
"self",
",",
"filename",
"=",
"'scores'",
",",
"filepath",
"=",
"''",
",",
"output_format",
"=",
"'csv'",
")",
":",
"scores_array",
"=",
"np",
".",
"array",
"(",
"[",
"]",
")",
"for",
"obs",
"in",
"self",
".",
"observations",... | This method calculates the scores and writes them to a file the data frame received. If the output format
is other than 'csv' it will print the scores.
:param filename: the name to give to the file
:type filename: string
:param filepath: the path to save the file
:type filepath: string
:param output_format: the format of the file to write ('csv')
:type output_format: string | [
"This",
"method",
"calculates",
"the",
"scores",
"and",
"writes",
"them",
"to",
"a",
"file",
"the",
"data",
"frame",
"received",
".",
"If",
"the",
"output",
"format",
"is",
"other",
"than",
"csv",
"it",
"will",
"print",
"the",
"scores",
"."
] | c7120263da2071bb139815fbdb56ca77b544f340 | https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/updrs.py#L244-L277 | train | 36,892 |
Duke-GCB/DukeDSClient | ddsc/core/download.py | ProjectDownload.run | def run(self):
"""
Download the contents of the specified project name or id to dest_directory.
"""
files_to_download = self.get_files_to_download()
total_files_size = self.get_total_files_size(files_to_download)
if self.file_download_pre_processor:
self.run_preprocessor(files_to_download)
self.try_create_dir(self.dest_directory)
watcher = ProgressPrinter(total_files_size, msg_verb='downloading')
self.download_files(files_to_download, watcher)
watcher.finished()
warnings = self.check_warnings()
if warnings:
watcher.show_warning(warnings) | python | def run(self):
"""
Download the contents of the specified project name or id to dest_directory.
"""
files_to_download = self.get_files_to_download()
total_files_size = self.get_total_files_size(files_to_download)
if self.file_download_pre_processor:
self.run_preprocessor(files_to_download)
self.try_create_dir(self.dest_directory)
watcher = ProgressPrinter(total_files_size, msg_verb='downloading')
self.download_files(files_to_download, watcher)
watcher.finished()
warnings = self.check_warnings()
if warnings:
watcher.show_warning(warnings) | [
"def",
"run",
"(",
"self",
")",
":",
"files_to_download",
"=",
"self",
".",
"get_files_to_download",
"(",
")",
"total_files_size",
"=",
"self",
".",
"get_total_files_size",
"(",
"files_to_download",
")",
"if",
"self",
".",
"file_download_pre_processor",
":",
"self... | Download the contents of the specified project name or id to dest_directory. | [
"Download",
"the",
"contents",
"of",
"the",
"specified",
"project",
"name",
"or",
"id",
"to",
"dest_directory",
"."
] | 117f68fb9bae82e4c81ea487ad5d61ac350f3726 | https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/download.py#L38-L54 | train | 36,893 |
Duke-GCB/DukeDSClient | ddsc/core/download.py | FileUrlDownloader.make_local_directories | def make_local_directories(self):
"""
Create directories necessary to download the files into dest_directory
"""
for remote_path in self._get_parent_remote_paths():
local_path = os.path.join(self.dest_directory, remote_path)
self._assure_dir_exists(local_path) | python | def make_local_directories(self):
"""
Create directories necessary to download the files into dest_directory
"""
for remote_path in self._get_parent_remote_paths():
local_path = os.path.join(self.dest_directory, remote_path)
self._assure_dir_exists(local_path) | [
"def",
"make_local_directories",
"(",
"self",
")",
":",
"for",
"remote_path",
"in",
"self",
".",
"_get_parent_remote_paths",
"(",
")",
":",
"local_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"dest_directory",
",",
"remote_path",
")",
"self"... | Create directories necessary to download the files into dest_directory | [
"Create",
"directories",
"necessary",
"to",
"download",
"the",
"files",
"into",
"dest_directory"
] | 117f68fb9bae82e4c81ea487ad5d61ac350f3726 | https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/download.py#L197-L203 | train | 36,894 |
Duke-GCB/DukeDSClient | ddsc/core/download.py | FileUrlDownloader.make_big_empty_files | def make_big_empty_files(self):
"""
Write out a empty file so the workers can seek to where they should write and write their data.
"""
for file_url in self.file_urls:
local_path = file_url.get_local_path(self.dest_directory)
with open(local_path, "wb") as outfile:
if file_url.size > 0:
outfile.seek(int(file_url.size) - 1)
outfile.write(b'\0') | python | def make_big_empty_files(self):
"""
Write out a empty file so the workers can seek to where they should write and write their data.
"""
for file_url in self.file_urls:
local_path = file_url.get_local_path(self.dest_directory)
with open(local_path, "wb") as outfile:
if file_url.size > 0:
outfile.seek(int(file_url.size) - 1)
outfile.write(b'\0') | [
"def",
"make_big_empty_files",
"(",
"self",
")",
":",
"for",
"file_url",
"in",
"self",
".",
"file_urls",
":",
"local_path",
"=",
"file_url",
".",
"get_local_path",
"(",
"self",
".",
"dest_directory",
")",
"with",
"open",
"(",
"local_path",
",",
"\"wb\"",
")"... | Write out a empty file so the workers can seek to where they should write and write their data. | [
"Write",
"out",
"a",
"empty",
"file",
"so",
"the",
"workers",
"can",
"seek",
"to",
"where",
"they",
"should",
"write",
"and",
"write",
"their",
"data",
"."
] | 117f68fb9bae82e4c81ea487ad5d61ac350f3726 | https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/download.py#L205-L214 | train | 36,895 |
Duke-GCB/DukeDSClient | ddsc/core/download.py | FileUrlDownloader.check_downloaded_files_sizes | def check_downloaded_files_sizes(self):
"""
Make sure the files sizes are correct. Since we manually create the files this will only catch overruns.
Raises ValueError if there is a problematic file.
"""
for file_url in self.file_urls:
local_path = file_url.get_local_path(self.dest_directory)
self.check_file_size(file_url.size, local_path) | python | def check_downloaded_files_sizes(self):
"""
Make sure the files sizes are correct. Since we manually create the files this will only catch overruns.
Raises ValueError if there is a problematic file.
"""
for file_url in self.file_urls:
local_path = file_url.get_local_path(self.dest_directory)
self.check_file_size(file_url.size, local_path) | [
"def",
"check_downloaded_files_sizes",
"(",
"self",
")",
":",
"for",
"file_url",
"in",
"self",
".",
"file_urls",
":",
"local_path",
"=",
"file_url",
".",
"get_local_path",
"(",
"self",
".",
"dest_directory",
")",
"self",
".",
"check_file_size",
"(",
"file_url",
... | Make sure the files sizes are correct. Since we manually create the files this will only catch overruns.
Raises ValueError if there is a problematic file. | [
"Make",
"sure",
"the",
"files",
"sizes",
"are",
"correct",
".",
"Since",
"we",
"manually",
"create",
"the",
"files",
"this",
"will",
"only",
"catch",
"overruns",
".",
"Raises",
"ValueError",
"if",
"there",
"is",
"a",
"problematic",
"file",
"."
] | 117f68fb9bae82e4c81ea487ad5d61ac350f3726 | https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/download.py#L299-L306 | train | 36,896 |
Duke-GCB/DukeDSClient | ddsc/core/download.py | RetryChunkDownloader._verify_download_complete | def _verify_download_complete(self):
"""
Make sure we received all the data
"""
if self.actual_bytes_read > self.bytes_to_read:
raise TooLargeChunkDownloadError(self.actual_bytes_read, self.bytes_to_read, self.local_path)
elif self.actual_bytes_read < self.bytes_to_read:
raise PartialChunkDownloadError(self.actual_bytes_read, self.bytes_to_read, self.local_path) | python | def _verify_download_complete(self):
"""
Make sure we received all the data
"""
if self.actual_bytes_read > self.bytes_to_read:
raise TooLargeChunkDownloadError(self.actual_bytes_read, self.bytes_to_read, self.local_path)
elif self.actual_bytes_read < self.bytes_to_read:
raise PartialChunkDownloadError(self.actual_bytes_read, self.bytes_to_read, self.local_path) | [
"def",
"_verify_download_complete",
"(",
"self",
")",
":",
"if",
"self",
".",
"actual_bytes_read",
">",
"self",
".",
"bytes_to_read",
":",
"raise",
"TooLargeChunkDownloadError",
"(",
"self",
".",
"actual_bytes_read",
",",
"self",
".",
"bytes_to_read",
",",
"self",... | Make sure we received all the data | [
"Make",
"sure",
"we",
"received",
"all",
"the",
"data"
] | 117f68fb9bae82e4c81ea487ad5d61ac350f3726 | https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/download.py#L474-L481 | train | 36,897 |
T-002/pycast | pycast/optimization/gridsearch.py | GridSearch.optimize | def optimize(self, timeSeries, forecastingMethods=None, startingPercentage=0.0, endPercentage=100.0):
"""Runs the optimization of the given TimeSeries.
:param TimeSeries timeSeries: TimeSeries instance that requires an optimized forecast.
:param list forecastingMethods: List of forecastingMethods that will be used for optimization.
:param float startingPercentage: Defines the start of the interval. This has to be a value in [0.0, 100.0].
It represents the value, where the error calculation should be started.
25.0 for example means that the first 25% of all calculated errors will be ignored.
:param float endPercentage: Defines the end of the interval. This has to be a value in [0.0, 100.0].
It represents the value, after which all error values will be ignored. 90.0 for example means that
the last 10% of all local errors will be ignored.
:return: Returns the optimized forecasting method, the corresponding error measure and the forecasting methods
parameters.
:rtype: [BaseForecastingMethod, BaseErrorMeasure, Dictionary]
:raise: Raises a :py:exc:`ValueError` ValueError if no forecastingMethods is empty.
"""
if forecastingMethods is None or len(forecastingMethods) == 0:
raise ValueError("forecastingMethods cannot be empty.")
self._startingPercentage = startingPercentage
self._endPercentage = endPercentage
results = []
for forecastingMethod in forecastingMethods:
results.append([forecastingMethod] + self.optimize_forecasting_method(timeSeries, forecastingMethod))
# get the forecasting method with the smallest error
bestForecastingMethod = min(results, key=lambda item: item[1].get_error(self._startingPercentage, self._endPercentage))
for parameter in bestForecastingMethod[2]:
bestForecastingMethod[0].set_parameter(parameter, bestForecastingMethod[2][parameter])
return bestForecastingMethod | python | def optimize(self, timeSeries, forecastingMethods=None, startingPercentage=0.0, endPercentage=100.0):
"""Runs the optimization of the given TimeSeries.
:param TimeSeries timeSeries: TimeSeries instance that requires an optimized forecast.
:param list forecastingMethods: List of forecastingMethods that will be used for optimization.
:param float startingPercentage: Defines the start of the interval. This has to be a value in [0.0, 100.0].
It represents the value, where the error calculation should be started.
25.0 for example means that the first 25% of all calculated errors will be ignored.
:param float endPercentage: Defines the end of the interval. This has to be a value in [0.0, 100.0].
It represents the value, after which all error values will be ignored. 90.0 for example means that
the last 10% of all local errors will be ignored.
:return: Returns the optimized forecasting method, the corresponding error measure and the forecasting methods
parameters.
:rtype: [BaseForecastingMethod, BaseErrorMeasure, Dictionary]
:raise: Raises a :py:exc:`ValueError` ValueError if no forecastingMethods is empty.
"""
if forecastingMethods is None or len(forecastingMethods) == 0:
raise ValueError("forecastingMethods cannot be empty.")
self._startingPercentage = startingPercentage
self._endPercentage = endPercentage
results = []
for forecastingMethod in forecastingMethods:
results.append([forecastingMethod] + self.optimize_forecasting_method(timeSeries, forecastingMethod))
# get the forecasting method with the smallest error
bestForecastingMethod = min(results, key=lambda item: item[1].get_error(self._startingPercentage, self._endPercentage))
for parameter in bestForecastingMethod[2]:
bestForecastingMethod[0].set_parameter(parameter, bestForecastingMethod[2][parameter])
return bestForecastingMethod | [
"def",
"optimize",
"(",
"self",
",",
"timeSeries",
",",
"forecastingMethods",
"=",
"None",
",",
"startingPercentage",
"=",
"0.0",
",",
"endPercentage",
"=",
"100.0",
")",
":",
"if",
"forecastingMethods",
"is",
"None",
"or",
"len",
"(",
"forecastingMethods",
")... | Runs the optimization of the given TimeSeries.
:param TimeSeries timeSeries: TimeSeries instance that requires an optimized forecast.
:param list forecastingMethods: List of forecastingMethods that will be used for optimization.
:param float startingPercentage: Defines the start of the interval. This has to be a value in [0.0, 100.0].
It represents the value, where the error calculation should be started.
25.0 for example means that the first 25% of all calculated errors will be ignored.
:param float endPercentage: Defines the end of the interval. This has to be a value in [0.0, 100.0].
It represents the value, after which all error values will be ignored. 90.0 for example means that
the last 10% of all local errors will be ignored.
:return: Returns the optimized forecasting method, the corresponding error measure and the forecasting methods
parameters.
:rtype: [BaseForecastingMethod, BaseErrorMeasure, Dictionary]
:raise: Raises a :py:exc:`ValueError` ValueError if no forecastingMethods is empty. | [
"Runs",
"the",
"optimization",
"of",
"the",
"given",
"TimeSeries",
"."
] | 8a53505c6d8367e0ea572e8af768e80b29e1cc41 | https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/optimization/gridsearch.py#L34-L69 | train | 36,898 |
T-002/pycast | pycast/optimization/gridsearch.py | GridSearch._generate_next_parameter_value | def _generate_next_parameter_value(self, parameter, forecastingMethod):
"""Generator for a specific parameter of the given forecasting method.
:param string parameter: Name of the parameter the generator is used for.
:param BaseForecastingMethod forecastingMethod: Instance of a ForecastingMethod.
:return: Creates a generator used to iterate over possible parameters.
:rtype: generator
"""
interval = forecastingMethod.get_interval(parameter)
precision = 10**self._precison
startValue = interval[0]
endValue = interval[1]
if not interval[2]:
startValue += precision
if interval[3]:
endValue += precision
while startValue < endValue:
# fix the parameter precision
parameterValue = startValue
yield parameterValue
startValue += precision | python | def _generate_next_parameter_value(self, parameter, forecastingMethod):
"""Generator for a specific parameter of the given forecasting method.
:param string parameter: Name of the parameter the generator is used for.
:param BaseForecastingMethod forecastingMethod: Instance of a ForecastingMethod.
:return: Creates a generator used to iterate over possible parameters.
:rtype: generator
"""
interval = forecastingMethod.get_interval(parameter)
precision = 10**self._precison
startValue = interval[0]
endValue = interval[1]
if not interval[2]:
startValue += precision
if interval[3]:
endValue += precision
while startValue < endValue:
# fix the parameter precision
parameterValue = startValue
yield parameterValue
startValue += precision | [
"def",
"_generate_next_parameter_value",
"(",
"self",
",",
"parameter",
",",
"forecastingMethod",
")",
":",
"interval",
"=",
"forecastingMethod",
".",
"get_interval",
"(",
"parameter",
")",
"precision",
"=",
"10",
"**",
"self",
".",
"_precison",
"startValue",
"=",... | Generator for a specific parameter of the given forecasting method.
:param string parameter: Name of the parameter the generator is used for.
:param BaseForecastingMethod forecastingMethod: Instance of a ForecastingMethod.
:return: Creates a generator used to iterate over possible parameters.
:rtype: generator | [
"Generator",
"for",
"a",
"specific",
"parameter",
"of",
"the",
"given",
"forecasting",
"method",
"."
] | 8a53505c6d8367e0ea572e8af768e80b29e1cc41 | https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/optimization/gridsearch.py#L72-L98 | train | 36,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.