repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
phaethon/kamene | kamene/pton_ntop.py | inet_ntop | def inet_ntop(af, addr):
"""Convert an IP address from binary form into text represenation"""
if af == socket.AF_INET:
return inet_ntoa(addr)
elif af == socket.AF_INET6:
# IPv6 addresses have 128bits (16 bytes)
if len(addr) != 16:
raise Exception("Illegal syntax for IP address")
parts = []
for left in [0, 2, 4, 6, 8, 10, 12, 14]:
try:
value = struct.unpack("!H", addr[left:left+2])[0]
hexstr = hex(value)[2:]
except TypeError:
raise Exception("Illegal syntax for IP address")
parts.append(hexstr.lstrip("0").lower())
result = b":".join(parts)
while b":::" in result:
result = result.replace(b":::", b"::")
# Leaving out leading and trailing zeros is only allowed with ::
if result.endswith(b":") and not result.endswith(b"::"):
result = result + b"0"
if result.startswith(b":") and not result.startswith(b"::"):
result = b"0" + result
return result
else:
raise Exception("Address family not supported yet") | python | def inet_ntop(af, addr):
"""Convert an IP address from binary form into text represenation"""
if af == socket.AF_INET:
return inet_ntoa(addr)
elif af == socket.AF_INET6:
# IPv6 addresses have 128bits (16 bytes)
if len(addr) != 16:
raise Exception("Illegal syntax for IP address")
parts = []
for left in [0, 2, 4, 6, 8, 10, 12, 14]:
try:
value = struct.unpack("!H", addr[left:left+2])[0]
hexstr = hex(value)[2:]
except TypeError:
raise Exception("Illegal syntax for IP address")
parts.append(hexstr.lstrip("0").lower())
result = b":".join(parts)
while b":::" in result:
result = result.replace(b":::", b"::")
# Leaving out leading and trailing zeros is only allowed with ::
if result.endswith(b":") and not result.endswith(b"::"):
result = result + b"0"
if result.startswith(b":") and not result.startswith(b"::"):
result = b"0" + result
return result
else:
raise Exception("Address family not supported yet") | [
"def",
"inet_ntop",
"(",
"af",
",",
"addr",
")",
":",
"if",
"af",
"==",
"socket",
".",
"AF_INET",
":",
"return",
"inet_ntoa",
"(",
"addr",
")",
"elif",
"af",
"==",
"socket",
".",
"AF_INET6",
":",
"# IPv6 addresses have 128bits (16 bytes)",
"if",
"len",
"(",
"addr",
")",
"!=",
"16",
":",
"raise",
"Exception",
"(",
"\"Illegal syntax for IP address\"",
")",
"parts",
"=",
"[",
"]",
"for",
"left",
"in",
"[",
"0",
",",
"2",
",",
"4",
",",
"6",
",",
"8",
",",
"10",
",",
"12",
",",
"14",
"]",
":",
"try",
":",
"value",
"=",
"struct",
".",
"unpack",
"(",
"\"!H\"",
",",
"addr",
"[",
"left",
":",
"left",
"+",
"2",
"]",
")",
"[",
"0",
"]",
"hexstr",
"=",
"hex",
"(",
"value",
")",
"[",
"2",
":",
"]",
"except",
"TypeError",
":",
"raise",
"Exception",
"(",
"\"Illegal syntax for IP address\"",
")",
"parts",
".",
"append",
"(",
"hexstr",
".",
"lstrip",
"(",
"\"0\"",
")",
".",
"lower",
"(",
")",
")",
"result",
"=",
"b\":\"",
".",
"join",
"(",
"parts",
")",
"while",
"b\":::\"",
"in",
"result",
":",
"result",
"=",
"result",
".",
"replace",
"(",
"b\":::\"",
",",
"b\"::\"",
")",
"# Leaving out leading and trailing zeros is only allowed with ::",
"if",
"result",
".",
"endswith",
"(",
"b\":\"",
")",
"and",
"not",
"result",
".",
"endswith",
"(",
"b\"::\"",
")",
":",
"result",
"=",
"result",
"+",
"b\"0\"",
"if",
"result",
".",
"startswith",
"(",
"b\":\"",
")",
"and",
"not",
"result",
".",
"startswith",
"(",
"b\"::\"",
")",
":",
"result",
"=",
"b\"0\"",
"+",
"result",
"return",
"result",
"else",
":",
"raise",
"Exception",
"(",
"\"Address family not supported yet\"",
")"
] | Convert an IP address from binary form into text represenation | [
"Convert",
"an",
"IP",
"address",
"from",
"binary",
"form",
"into",
"text",
"represenation"
] | 11d4064844f4f68ac5d7546f5633ac7d02082914 | https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/pton_ntop.py#L64-L90 | train | 237,300 |
phaethon/kamene | kamene/crypto/cert.py | strand | def strand(s1, s2):
"""
Returns the binary AND of the 2 provided strings s1 and s2. s1 and s2
must be of same length.
"""
return "".join(map(lambda x,y:chr(ord(x)&ord(y)), s1, s2)) | python | def strand(s1, s2):
"""
Returns the binary AND of the 2 provided strings s1 and s2. s1 and s2
must be of same length.
"""
return "".join(map(lambda x,y:chr(ord(x)&ord(y)), s1, s2)) | [
"def",
"strand",
"(",
"s1",
",",
"s2",
")",
":",
"return",
"\"\"",
".",
"join",
"(",
"map",
"(",
"lambda",
"x",
",",
"y",
":",
"chr",
"(",
"ord",
"(",
"x",
")",
"&",
"ord",
"(",
"y",
")",
")",
",",
"s1",
",",
"s2",
")",
")"
] | Returns the binary AND of the 2 provided strings s1 and s2. s1 and s2
must be of same length. | [
"Returns",
"the",
"binary",
"AND",
"of",
"the",
"2",
"provided",
"strings",
"s1",
"and",
"s2",
".",
"s1",
"and",
"s2",
"must",
"be",
"of",
"same",
"length",
"."
] | 11d4064844f4f68ac5d7546f5633ac7d02082914 | https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/crypto/cert.py#L57-L62 | train | 237,301 |
phaethon/kamene | kamene/crypto/cert.py | pkcs_mgf1 | def pkcs_mgf1(mgfSeed, maskLen, h):
"""
Implements generic MGF1 Mask Generation function as described in
Appendix B.2.1 of RFC 3447. The hash function is passed by name.
valid values are 'md2', 'md4', 'md5', 'sha1', 'tls, 'sha256',
'sha384' and 'sha512'. Returns None on error.
Input:
mgfSeed: seed from which mask is generated, an octet string
maskLen: intended length in octets of the mask, at most 2^32 * hLen
hLen (see below)
h : hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
'sha256', 'sha384'). hLen denotes the length in octets of
the hash function output.
Output:
an octet string of length maskLen
"""
# steps are those of Appendix B.2.1
if not h in _hashFuncParams:
warning("pkcs_mgf1: invalid hash (%s) provided")
return None
hLen = _hashFuncParams[h][0]
hFunc = _hashFuncParams[h][1]
if maskLen > 2**32 * hLen: # 1)
warning("pkcs_mgf1: maskLen > 2**32 * hLen")
return None
T = "" # 2)
maxCounter = math.ceil(float(maskLen) / float(hLen)) # 3)
counter = 0
while counter < maxCounter:
C = pkcs_i2osp(counter, 4)
T += hFunc(mgfSeed + C)
counter += 1
return T[:maskLen] | python | def pkcs_mgf1(mgfSeed, maskLen, h):
"""
Implements generic MGF1 Mask Generation function as described in
Appendix B.2.1 of RFC 3447. The hash function is passed by name.
valid values are 'md2', 'md4', 'md5', 'sha1', 'tls, 'sha256',
'sha384' and 'sha512'. Returns None on error.
Input:
mgfSeed: seed from which mask is generated, an octet string
maskLen: intended length in octets of the mask, at most 2^32 * hLen
hLen (see below)
h : hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
'sha256', 'sha384'). hLen denotes the length in octets of
the hash function output.
Output:
an octet string of length maskLen
"""
# steps are those of Appendix B.2.1
if not h in _hashFuncParams:
warning("pkcs_mgf1: invalid hash (%s) provided")
return None
hLen = _hashFuncParams[h][0]
hFunc = _hashFuncParams[h][1]
if maskLen > 2**32 * hLen: # 1)
warning("pkcs_mgf1: maskLen > 2**32 * hLen")
return None
T = "" # 2)
maxCounter = math.ceil(float(maskLen) / float(hLen)) # 3)
counter = 0
while counter < maxCounter:
C = pkcs_i2osp(counter, 4)
T += hFunc(mgfSeed + C)
counter += 1
return T[:maskLen] | [
"def",
"pkcs_mgf1",
"(",
"mgfSeed",
",",
"maskLen",
",",
"h",
")",
":",
"# steps are those of Appendix B.2.1",
"if",
"not",
"h",
"in",
"_hashFuncParams",
":",
"warning",
"(",
"\"pkcs_mgf1: invalid hash (%s) provided\"",
")",
"return",
"None",
"hLen",
"=",
"_hashFuncParams",
"[",
"h",
"]",
"[",
"0",
"]",
"hFunc",
"=",
"_hashFuncParams",
"[",
"h",
"]",
"[",
"1",
"]",
"if",
"maskLen",
">",
"2",
"**",
"32",
"*",
"hLen",
":",
"# 1)",
"warning",
"(",
"\"pkcs_mgf1: maskLen > 2**32 * hLen\"",
")",
"return",
"None",
"T",
"=",
"\"\"",
"# 2)",
"maxCounter",
"=",
"math",
".",
"ceil",
"(",
"float",
"(",
"maskLen",
")",
"/",
"float",
"(",
"hLen",
")",
")",
"# 3)",
"counter",
"=",
"0",
"while",
"counter",
"<",
"maxCounter",
":",
"C",
"=",
"pkcs_i2osp",
"(",
"counter",
",",
"4",
")",
"T",
"+=",
"hFunc",
"(",
"mgfSeed",
"+",
"C",
")",
"counter",
"+=",
"1",
"return",
"T",
"[",
":",
"maskLen",
"]"
] | Implements generic MGF1 Mask Generation function as described in
Appendix B.2.1 of RFC 3447. The hash function is passed by name.
valid values are 'md2', 'md4', 'md5', 'sha1', 'tls, 'sha256',
'sha384' and 'sha512'. Returns None on error.
Input:
mgfSeed: seed from which mask is generated, an octet string
maskLen: intended length in octets of the mask, at most 2^32 * hLen
hLen (see below)
h : hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
'sha256', 'sha384'). hLen denotes the length in octets of
the hash function output.
Output:
an octet string of length maskLen | [
"Implements",
"generic",
"MGF1",
"Mask",
"Generation",
"function",
"as",
"described",
"in",
"Appendix",
"B",
".",
"2",
".",
"1",
"of",
"RFC",
"3447",
".",
"The",
"hash",
"function",
"is",
"passed",
"by",
"name",
".",
"valid",
"values",
"are",
"md2",
"md4",
"md5",
"sha1",
"tls",
"sha256",
"sha384",
"and",
"sha512",
".",
"Returns",
"None",
"on",
"error",
"."
] | 11d4064844f4f68ac5d7546f5633ac7d02082914 | https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/crypto/cert.py#L160-L195 | train | 237,302 |
phaethon/kamene | kamene/crypto/cert.py | create_temporary_ca_path | def create_temporary_ca_path(anchor_list, folder):
"""
Create a CA path folder as defined in OpenSSL terminology, by
storing all certificates in 'anchor_list' list in PEM format
under provided 'folder' and then creating the associated links
using the hash as usually done by c_rehash.
Note that you can also include CRL in 'anchor_list'. In that
case, they will also be stored under 'folder' and associated
links will be created.
In folder, the files are created with names of the form
0...ZZ.pem. If you provide an empty list, folder will be created
if it does not already exist, but that's all.
The number of certificates written to folder is returned on
success, None on error.
"""
# We should probably avoid writing duplicate anchors and also
# check if they are all certs.
try:
if not os.path.isdir(folder):
os.makedirs(folder)
except:
return None
l = len(anchor_list)
if l == 0:
return None
fmtstr = "%%0%sd.pem" % math.ceil(math.log(l, 10))
i = 0
try:
for a in anchor_list:
fname = os.path.join(folder, fmtstr % i)
f = open(fname, "w")
s = a.output(fmt="PEM")
f.write(s)
f.close()
i += 1
except:
return None
r,w,e=popen3(["c_rehash", folder])
r.close(); w.close(); e.close()
return l | python | def create_temporary_ca_path(anchor_list, folder):
"""
Create a CA path folder as defined in OpenSSL terminology, by
storing all certificates in 'anchor_list' list in PEM format
under provided 'folder' and then creating the associated links
using the hash as usually done by c_rehash.
Note that you can also include CRL in 'anchor_list'. In that
case, they will also be stored under 'folder' and associated
links will be created.
In folder, the files are created with names of the form
0...ZZ.pem. If you provide an empty list, folder will be created
if it does not already exist, but that's all.
The number of certificates written to folder is returned on
success, None on error.
"""
# We should probably avoid writing duplicate anchors and also
# check if they are all certs.
try:
if not os.path.isdir(folder):
os.makedirs(folder)
except:
return None
l = len(anchor_list)
if l == 0:
return None
fmtstr = "%%0%sd.pem" % math.ceil(math.log(l, 10))
i = 0
try:
for a in anchor_list:
fname = os.path.join(folder, fmtstr % i)
f = open(fname, "w")
s = a.output(fmt="PEM")
f.write(s)
f.close()
i += 1
except:
return None
r,w,e=popen3(["c_rehash", folder])
r.close(); w.close(); e.close()
return l | [
"def",
"create_temporary_ca_path",
"(",
"anchor_list",
",",
"folder",
")",
":",
"# We should probably avoid writing duplicate anchors and also",
"# check if they are all certs.",
"try",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"folder",
")",
":",
"os",
".",
"makedirs",
"(",
"folder",
")",
"except",
":",
"return",
"None",
"l",
"=",
"len",
"(",
"anchor_list",
")",
"if",
"l",
"==",
"0",
":",
"return",
"None",
"fmtstr",
"=",
"\"%%0%sd.pem\"",
"%",
"math",
".",
"ceil",
"(",
"math",
".",
"log",
"(",
"l",
",",
"10",
")",
")",
"i",
"=",
"0",
"try",
":",
"for",
"a",
"in",
"anchor_list",
":",
"fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"fmtstr",
"%",
"i",
")",
"f",
"=",
"open",
"(",
"fname",
",",
"\"w\"",
")",
"s",
"=",
"a",
".",
"output",
"(",
"fmt",
"=",
"\"PEM\"",
")",
"f",
".",
"write",
"(",
"s",
")",
"f",
".",
"close",
"(",
")",
"i",
"+=",
"1",
"except",
":",
"return",
"None",
"r",
",",
"w",
",",
"e",
"=",
"popen3",
"(",
"[",
"\"c_rehash\"",
",",
"folder",
"]",
")",
"r",
".",
"close",
"(",
")",
"w",
".",
"close",
"(",
")",
"e",
".",
"close",
"(",
")",
"return",
"l"
] | Create a CA path folder as defined in OpenSSL terminology, by
storing all certificates in 'anchor_list' list in PEM format
under provided 'folder' and then creating the associated links
using the hash as usually done by c_rehash.
Note that you can also include CRL in 'anchor_list'. In that
case, they will also be stored under 'folder' and associated
links will be created.
In folder, the files are created with names of the form
0...ZZ.pem. If you provide an empty list, folder will be created
if it does not already exist, but that's all.
The number of certificates written to folder is returned on
success, None on error. | [
"Create",
"a",
"CA",
"path",
"folder",
"as",
"defined",
"in",
"OpenSSL",
"terminology",
"by",
"storing",
"all",
"certificates",
"in",
"anchor_list",
"list",
"in",
"PEM",
"format",
"under",
"provided",
"folder",
"and",
"then",
"creating",
"the",
"associated",
"links",
"using",
"the",
"hash",
"as",
"usually",
"done",
"by",
"c_rehash",
"."
] | 11d4064844f4f68ac5d7546f5633ac7d02082914 | https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/crypto/cert.py#L382-L427 | train | 237,303 |
phaethon/kamene | kamene/crypto/cert.py | _DecryptAndSignMethods._rsadp | def _rsadp(self, c):
"""
Internal method providing raw RSA decryption, i.e. simple modular
exponentiation of the given ciphertext representative 'c', a long
between 0 and n-1.
This is the decryption primitive RSADP described in PKCS#1 v2.1,
i.e. RFC 3447 Sect. 5.1.2.
Input:
c: ciphertest representative, a long between 0 and n-1, where
n is the key modulus.
Output:
ciphertext representative, a long between 0 and n-1
Not intended to be used directly. Please, see encrypt() method.
"""
n = self.modulus
if type(c) is int:
c = long(c)
if type(c) is not long or c > n-1:
warning("Key._rsaep() expects a long between 0 and n-1")
return None
return self.key.decrypt(c) | python | def _rsadp(self, c):
"""
Internal method providing raw RSA decryption, i.e. simple modular
exponentiation of the given ciphertext representative 'c', a long
between 0 and n-1.
This is the decryption primitive RSADP described in PKCS#1 v2.1,
i.e. RFC 3447 Sect. 5.1.2.
Input:
c: ciphertest representative, a long between 0 and n-1, where
n is the key modulus.
Output:
ciphertext representative, a long between 0 and n-1
Not intended to be used directly. Please, see encrypt() method.
"""
n = self.modulus
if type(c) is int:
c = long(c)
if type(c) is not long or c > n-1:
warning("Key._rsaep() expects a long between 0 and n-1")
return None
return self.key.decrypt(c) | [
"def",
"_rsadp",
"(",
"self",
",",
"c",
")",
":",
"n",
"=",
"self",
".",
"modulus",
"if",
"type",
"(",
"c",
")",
"is",
"int",
":",
"c",
"=",
"long",
"(",
"c",
")",
"if",
"type",
"(",
"c",
")",
"is",
"not",
"long",
"or",
"c",
">",
"n",
"-",
"1",
":",
"warning",
"(",
"\"Key._rsaep() expects a long between 0 and n-1\"",
")",
"return",
"None",
"return",
"self",
".",
"key",
".",
"decrypt",
"(",
"c",
")"
] | Internal method providing raw RSA decryption, i.e. simple modular
exponentiation of the given ciphertext representative 'c', a long
between 0 and n-1.
This is the decryption primitive RSADP described in PKCS#1 v2.1,
i.e. RFC 3447 Sect. 5.1.2.
Input:
c: ciphertest representative, a long between 0 and n-1, where
n is the key modulus.
Output:
ciphertext representative, a long between 0 and n-1
Not intended to be used directly. Please, see encrypt() method. | [
"Internal",
"method",
"providing",
"raw",
"RSA",
"decryption",
"i",
".",
"e",
".",
"simple",
"modular",
"exponentiation",
"of",
"the",
"given",
"ciphertext",
"representative",
"c",
"a",
"long",
"between",
"0",
"and",
"n",
"-",
"1",
"."
] | 11d4064844f4f68ac5d7546f5633ac7d02082914 | https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/crypto/cert.py#L576-L602 | train | 237,304 |
phaethon/kamene | kamene/layers/inet.py | fragment | def fragment(pkt, fragsize=1480):
"""Fragment a big IP datagram"""
fragsize = (fragsize + 7) // 8 * 8
lst = []
for p in pkt:
s = bytes(p[IP].payload)
nb = (len(s) + fragsize - 1) // fragsize
for i in range(nb):
q = p.copy()
del q[IP].payload
del q[IP].chksum
del q[IP].len
if i == nb - 1:
q[IP].flags &= ~1
else:
q[IP].flags |= 1
q[IP].frag = i * fragsize // 8
r = conf.raw_layer(load=s[i * fragsize:(i + 1) * fragsize])
r.overload_fields = p[IP].payload.overload_fields.copy()
q.add_payload(r)
lst.append(q)
return lst | python | def fragment(pkt, fragsize=1480):
"""Fragment a big IP datagram"""
fragsize = (fragsize + 7) // 8 * 8
lst = []
for p in pkt:
s = bytes(p[IP].payload)
nb = (len(s) + fragsize - 1) // fragsize
for i in range(nb):
q = p.copy()
del q[IP].payload
del q[IP].chksum
del q[IP].len
if i == nb - 1:
q[IP].flags &= ~1
else:
q[IP].flags |= 1
q[IP].frag = i * fragsize // 8
r = conf.raw_layer(load=s[i * fragsize:(i + 1) * fragsize])
r.overload_fields = p[IP].payload.overload_fields.copy()
q.add_payload(r)
lst.append(q)
return lst | [
"def",
"fragment",
"(",
"pkt",
",",
"fragsize",
"=",
"1480",
")",
":",
"fragsize",
"=",
"(",
"fragsize",
"+",
"7",
")",
"//",
"8",
"*",
"8",
"lst",
"=",
"[",
"]",
"for",
"p",
"in",
"pkt",
":",
"s",
"=",
"bytes",
"(",
"p",
"[",
"IP",
"]",
".",
"payload",
")",
"nb",
"=",
"(",
"len",
"(",
"s",
")",
"+",
"fragsize",
"-",
"1",
")",
"//",
"fragsize",
"for",
"i",
"in",
"range",
"(",
"nb",
")",
":",
"q",
"=",
"p",
".",
"copy",
"(",
")",
"del",
"q",
"[",
"IP",
"]",
".",
"payload",
"del",
"q",
"[",
"IP",
"]",
".",
"chksum",
"del",
"q",
"[",
"IP",
"]",
".",
"len",
"if",
"i",
"==",
"nb",
"-",
"1",
":",
"q",
"[",
"IP",
"]",
".",
"flags",
"&=",
"~",
"1",
"else",
":",
"q",
"[",
"IP",
"]",
".",
"flags",
"|=",
"1",
"q",
"[",
"IP",
"]",
".",
"frag",
"=",
"i",
"*",
"fragsize",
"//",
"8",
"r",
"=",
"conf",
".",
"raw_layer",
"(",
"load",
"=",
"s",
"[",
"i",
"*",
"fragsize",
":",
"(",
"i",
"+",
"1",
")",
"*",
"fragsize",
"]",
")",
"r",
".",
"overload_fields",
"=",
"p",
"[",
"IP",
"]",
".",
"payload",
".",
"overload_fields",
".",
"copy",
"(",
")",
"q",
".",
"add_payload",
"(",
"r",
")",
"lst",
".",
"append",
"(",
"q",
")",
"return",
"lst"
] | Fragment a big IP datagram | [
"Fragment",
"a",
"big",
"IP",
"datagram"
] | 11d4064844f4f68ac5d7546f5633ac7d02082914 | https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/layers/inet.py#L864-L885 | train | 237,305 |
twitterdev/search-tweets-python | setup.py | parse_version | def parse_version(str_):
"""
Parses the program's version from a python variable declaration.
"""
v = re.findall(r"\d+.\d+.\d+", str_)
if v:
return v[0]
else:
print("cannot parse string {}".format(str_))
raise KeyError | python | def parse_version(str_):
"""
Parses the program's version from a python variable declaration.
"""
v = re.findall(r"\d+.\d+.\d+", str_)
if v:
return v[0]
else:
print("cannot parse string {}".format(str_))
raise KeyError | [
"def",
"parse_version",
"(",
"str_",
")",
":",
"v",
"=",
"re",
".",
"findall",
"(",
"r\"\\d+.\\d+.\\d+\"",
",",
"str_",
")",
"if",
"v",
":",
"return",
"v",
"[",
"0",
"]",
"else",
":",
"print",
"(",
"\"cannot parse string {}\"",
".",
"format",
"(",
"str_",
")",
")",
"raise",
"KeyError"
] | Parses the program's version from a python variable declaration. | [
"Parses",
"the",
"program",
"s",
"version",
"from",
"a",
"python",
"variable",
"declaration",
"."
] | 7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5 | https://github.com/twitterdev/search-tweets-python/blob/7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5/setup.py#L8-L17 | train | 237,306 |
twitterdev/search-tweets-python | searchtweets/result_stream.py | make_session | def make_session(username=None, password=None, bearer_token=None, extra_headers_dict=None):
"""Creates a Requests Session for use. Accepts a bearer token
for premiums users and will override username and password information if
present.
Args:
username (str): username for the session
password (str): password for the user
bearer_token (str): token for a premium API user.
"""
if password is None and bearer_token is None:
logger.error("No authentication information provided; "
"please check your object")
raise KeyError
session = requests.Session()
session.trust_env = False
headers = {'Accept-encoding': 'gzip',
'User-Agent': 'twitterdev-search-tweets-python/' + VERSION}
if bearer_token:
logger.info("using bearer token for authentication")
headers['Authorization'] = "Bearer {}".format(bearer_token)
session.headers = headers
else:
logger.info("using username and password for authentication")
session.auth = username, password
session.headers = headers
if extra_headers_dict:
headers.update(extra_headers_dict)
return session | python | def make_session(username=None, password=None, bearer_token=None, extra_headers_dict=None):
"""Creates a Requests Session for use. Accepts a bearer token
for premiums users and will override username and password information if
present.
Args:
username (str): username for the session
password (str): password for the user
bearer_token (str): token for a premium API user.
"""
if password is None and bearer_token is None:
logger.error("No authentication information provided; "
"please check your object")
raise KeyError
session = requests.Session()
session.trust_env = False
headers = {'Accept-encoding': 'gzip',
'User-Agent': 'twitterdev-search-tweets-python/' + VERSION}
if bearer_token:
logger.info("using bearer token for authentication")
headers['Authorization'] = "Bearer {}".format(bearer_token)
session.headers = headers
else:
logger.info("using username and password for authentication")
session.auth = username, password
session.headers = headers
if extra_headers_dict:
headers.update(extra_headers_dict)
return session | [
"def",
"make_session",
"(",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"bearer_token",
"=",
"None",
",",
"extra_headers_dict",
"=",
"None",
")",
":",
"if",
"password",
"is",
"None",
"and",
"bearer_token",
"is",
"None",
":",
"logger",
".",
"error",
"(",
"\"No authentication information provided; \"",
"\"please check your object\"",
")",
"raise",
"KeyError",
"session",
"=",
"requests",
".",
"Session",
"(",
")",
"session",
".",
"trust_env",
"=",
"False",
"headers",
"=",
"{",
"'Accept-encoding'",
":",
"'gzip'",
",",
"'User-Agent'",
":",
"'twitterdev-search-tweets-python/'",
"+",
"VERSION",
"}",
"if",
"bearer_token",
":",
"logger",
".",
"info",
"(",
"\"using bearer token for authentication\"",
")",
"headers",
"[",
"'Authorization'",
"]",
"=",
"\"Bearer {}\"",
".",
"format",
"(",
"bearer_token",
")",
"session",
".",
"headers",
"=",
"headers",
"else",
":",
"logger",
".",
"info",
"(",
"\"using username and password for authentication\"",
")",
"session",
".",
"auth",
"=",
"username",
",",
"password",
"session",
".",
"headers",
"=",
"headers",
"if",
"extra_headers_dict",
":",
"headers",
".",
"update",
"(",
"extra_headers_dict",
")",
"return",
"session"
] | Creates a Requests Session for use. Accepts a bearer token
for premiums users and will override username and password information if
present.
Args:
username (str): username for the session
password (str): password for the user
bearer_token (str): token for a premium API user. | [
"Creates",
"a",
"Requests",
"Session",
"for",
"use",
".",
"Accepts",
"a",
"bearer",
"token",
"for",
"premiums",
"users",
"and",
"will",
"override",
"username",
"and",
"password",
"information",
"if",
"present",
"."
] | 7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5 | https://github.com/twitterdev/search-tweets-python/blob/7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5/searchtweets/result_stream.py#L31-L61 | train | 237,307 |
twitterdev/search-tweets-python | searchtweets/result_stream.py | retry | def retry(func):
"""
Decorator to handle API retries and exceptions. Defaults to three retries.
Args:
func (function): function for decoration
Returns:
decorated function
"""
def retried_func(*args, **kwargs):
max_tries = 3
tries = 0
while True:
try:
resp = func(*args, **kwargs)
except requests.exceptions.ConnectionError as exc:
exc.msg = "Connection error for session; exiting"
raise exc
except requests.exceptions.HTTPError as exc:
exc.msg = "HTTP error for session; exiting"
raise exc
if resp.status_code != 200 and tries < max_tries:
logger.warning("retrying request; current status code: {}"
.format(resp.status_code))
tries += 1
# mini exponential backoff here.
time.sleep(tries ** 2)
continue
break
if resp.status_code != 200:
error_message = resp.json()["error"]["message"]
logger.error("HTTP Error code: {}: {}".format(resp.status_code, error_message))
logger.error("Rule payload: {}".format(kwargs["rule_payload"]))
raise requests.exceptions.HTTPError
return resp
return retried_func | python | def retry(func):
"""
Decorator to handle API retries and exceptions. Defaults to three retries.
Args:
func (function): function for decoration
Returns:
decorated function
"""
def retried_func(*args, **kwargs):
max_tries = 3
tries = 0
while True:
try:
resp = func(*args, **kwargs)
except requests.exceptions.ConnectionError as exc:
exc.msg = "Connection error for session; exiting"
raise exc
except requests.exceptions.HTTPError as exc:
exc.msg = "HTTP error for session; exiting"
raise exc
if resp.status_code != 200 and tries < max_tries:
logger.warning("retrying request; current status code: {}"
.format(resp.status_code))
tries += 1
# mini exponential backoff here.
time.sleep(tries ** 2)
continue
break
if resp.status_code != 200:
error_message = resp.json()["error"]["message"]
logger.error("HTTP Error code: {}: {}".format(resp.status_code, error_message))
logger.error("Rule payload: {}".format(kwargs["rule_payload"]))
raise requests.exceptions.HTTPError
return resp
return retried_func | [
"def",
"retry",
"(",
"func",
")",
":",
"def",
"retried_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"max_tries",
"=",
"3",
"tries",
"=",
"0",
"while",
"True",
":",
"try",
":",
"resp",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"requests",
".",
"exceptions",
".",
"ConnectionError",
"as",
"exc",
":",
"exc",
".",
"msg",
"=",
"\"Connection error for session; exiting\"",
"raise",
"exc",
"except",
"requests",
".",
"exceptions",
".",
"HTTPError",
"as",
"exc",
":",
"exc",
".",
"msg",
"=",
"\"HTTP error for session; exiting\"",
"raise",
"exc",
"if",
"resp",
".",
"status_code",
"!=",
"200",
"and",
"tries",
"<",
"max_tries",
":",
"logger",
".",
"warning",
"(",
"\"retrying request; current status code: {}\"",
".",
"format",
"(",
"resp",
".",
"status_code",
")",
")",
"tries",
"+=",
"1",
"# mini exponential backoff here.",
"time",
".",
"sleep",
"(",
"tries",
"**",
"2",
")",
"continue",
"break",
"if",
"resp",
".",
"status_code",
"!=",
"200",
":",
"error_message",
"=",
"resp",
".",
"json",
"(",
")",
"[",
"\"error\"",
"]",
"[",
"\"message\"",
"]",
"logger",
".",
"error",
"(",
"\"HTTP Error code: {}: {}\"",
".",
"format",
"(",
"resp",
".",
"status_code",
",",
"error_message",
")",
")",
"logger",
".",
"error",
"(",
"\"Rule payload: {}\"",
".",
"format",
"(",
"kwargs",
"[",
"\"rule_payload\"",
"]",
")",
")",
"raise",
"requests",
".",
"exceptions",
".",
"HTTPError",
"return",
"resp",
"return",
"retried_func"
] | Decorator to handle API retries and exceptions. Defaults to three retries.
Args:
func (function): function for decoration
Returns:
decorated function | [
"Decorator",
"to",
"handle",
"API",
"retries",
"and",
"exceptions",
".",
"Defaults",
"to",
"three",
"retries",
"."
] | 7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5 | https://github.com/twitterdev/search-tweets-python/blob/7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5/searchtweets/result_stream.py#L64-L108 | train | 237,308 |
twitterdev/search-tweets-python | searchtweets/result_stream.py | request | def request(session, url, rule_payload, **kwargs):
"""
Executes a request with the given payload and arguments.
Args:
session (requests.Session): the valid session object
url (str): Valid API endpoint
rule_payload (str or dict): rule package for the POST. If you pass a
dictionary, it will be converted into JSON.
"""
if isinstance(rule_payload, dict):
rule_payload = json.dumps(rule_payload)
logger.debug("sending request")
result = session.post(url, data=rule_payload, **kwargs)
return result | python | def request(session, url, rule_payload, **kwargs):
"""
Executes a request with the given payload and arguments.
Args:
session (requests.Session): the valid session object
url (str): Valid API endpoint
rule_payload (str or dict): rule package for the POST. If you pass a
dictionary, it will be converted into JSON.
"""
if isinstance(rule_payload, dict):
rule_payload = json.dumps(rule_payload)
logger.debug("sending request")
result = session.post(url, data=rule_payload, **kwargs)
return result | [
"def",
"request",
"(",
"session",
",",
"url",
",",
"rule_payload",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"rule_payload",
",",
"dict",
")",
":",
"rule_payload",
"=",
"json",
".",
"dumps",
"(",
"rule_payload",
")",
"logger",
".",
"debug",
"(",
"\"sending request\"",
")",
"result",
"=",
"session",
".",
"post",
"(",
"url",
",",
"data",
"=",
"rule_payload",
",",
"*",
"*",
"kwargs",
")",
"return",
"result"
] | Executes a request with the given payload and arguments.
Args:
session (requests.Session): the valid session object
url (str): Valid API endpoint
rule_payload (str or dict): rule package for the POST. If you pass a
dictionary, it will be converted into JSON. | [
"Executes",
"a",
"request",
"with",
"the",
"given",
"payload",
"and",
"arguments",
"."
] | 7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5 | https://github.com/twitterdev/search-tweets-python/blob/7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5/searchtweets/result_stream.py#L112-L126 | train | 237,309 |
twitterdev/search-tweets-python | searchtweets/result_stream.py | collect_results | def collect_results(rule, max_results=500, result_stream_args=None):
"""
Utility function to quickly get a list of tweets from a ``ResultStream``
without keeping the object around. Requires your args to be configured
prior to using.
Args:
rule (str): valid powertrack rule for your account, preferably
generated by the `gen_rule_payload` function.
max_results (int): maximum number of tweets or counts to return from
the API / underlying ``ResultStream`` object.
result_stream_args (dict): configuration dict that has connection
information for a ``ResultStream`` object.
Returns:
list of results
Example:
>>> from searchtweets import collect_results
>>> tweets = collect_results(rule,
max_results=500,
result_stream_args=search_args)
"""
if result_stream_args is None:
logger.error("This function requires a configuration dict for the "
"inner ResultStream object.")
raise KeyError
rs = ResultStream(rule_payload=rule,
max_results=max_results,
**result_stream_args)
return list(rs.stream()) | python | def collect_results(rule, max_results=500, result_stream_args=None):
"""
Utility function to quickly get a list of tweets from a ``ResultStream``
without keeping the object around. Requires your args to be configured
prior to using.
Args:
rule (str): valid powertrack rule for your account, preferably
generated by the `gen_rule_payload` function.
max_results (int): maximum number of tweets or counts to return from
the API / underlying ``ResultStream`` object.
result_stream_args (dict): configuration dict that has connection
information for a ``ResultStream`` object.
Returns:
list of results
Example:
>>> from searchtweets import collect_results
>>> tweets = collect_results(rule,
max_results=500,
result_stream_args=search_args)
"""
if result_stream_args is None:
logger.error("This function requires a configuration dict for the "
"inner ResultStream object.")
raise KeyError
rs = ResultStream(rule_payload=rule,
max_results=max_results,
**result_stream_args)
return list(rs.stream()) | [
"def",
"collect_results",
"(",
"rule",
",",
"max_results",
"=",
"500",
",",
"result_stream_args",
"=",
"None",
")",
":",
"if",
"result_stream_args",
"is",
"None",
":",
"logger",
".",
"error",
"(",
"\"This function requires a configuration dict for the \"",
"\"inner ResultStream object.\"",
")",
"raise",
"KeyError",
"rs",
"=",
"ResultStream",
"(",
"rule_payload",
"=",
"rule",
",",
"max_results",
"=",
"max_results",
",",
"*",
"*",
"result_stream_args",
")",
"return",
"list",
"(",
"rs",
".",
"stream",
"(",
")",
")"
] | Utility function to quickly get a list of tweets from a ``ResultStream``
without keeping the object around. Requires your args to be configured
prior to using.
Args:
rule (str): valid powertrack rule for your account, preferably
generated by the `gen_rule_payload` function.
max_results (int): maximum number of tweets or counts to return from
the API / underlying ``ResultStream`` object.
result_stream_args (dict): configuration dict that has connection
information for a ``ResultStream`` object.
Returns:
list of results
Example:
>>> from searchtweets import collect_results
>>> tweets = collect_results(rule,
max_results=500,
result_stream_args=search_args) | [
"Utility",
"function",
"to",
"quickly",
"get",
"a",
"list",
"of",
"tweets",
"from",
"a",
"ResultStream",
"without",
"keeping",
"the",
"object",
"around",
".",
"Requires",
"your",
"args",
"to",
"be",
"configured",
"prior",
"to",
"using",
"."
] | 7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5 | https://github.com/twitterdev/search-tweets-python/blob/7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5/searchtweets/result_stream.py#L276-L308 | train | 237,310 |
twitterdev/search-tweets-python | searchtweets/result_stream.py | ResultStream.stream | def stream(self):
"""
Main entry point for the data from the API. Will automatically paginate
through the results via the ``next`` token and return up to ``max_results``
tweets or up to ``max_requests`` API calls, whichever is lower.
Usage:
>>> result_stream = ResultStream(**kwargs)
>>> stream = result_stream.stream()
>>> results = list(stream)
>>> # or for faster usage...
>>> results = list(ResultStream(**kwargs).stream())
"""
self.init_session()
self.check_counts()
self.execute_request()
self.stream_started = True
while True:
for tweet in self.current_tweets:
if self.total_results >= self.max_results:
break
yield self._tweet_func(tweet)
self.total_results += 1
if self.next_token and self.total_results < self.max_results and self.n_requests <= self.max_requests:
self.rule_payload = merge_dicts(self.rule_payload,
{"next": self.next_token})
logger.info("paging; total requests read so far: {}"
.format(self.n_requests))
self.execute_request()
else:
break
logger.info("ending stream at {} tweets".format(self.total_results))
self.current_tweets = None
self.session.close() | python | def stream(self):
"""
Main entry point for the data from the API. Will automatically paginate
through the results via the ``next`` token and return up to ``max_results``
tweets or up to ``max_requests`` API calls, whichever is lower.
Usage:
>>> result_stream = ResultStream(**kwargs)
>>> stream = result_stream.stream()
>>> results = list(stream)
>>> # or for faster usage...
>>> results = list(ResultStream(**kwargs).stream())
"""
self.init_session()
self.check_counts()
self.execute_request()
self.stream_started = True
while True:
for tweet in self.current_tweets:
if self.total_results >= self.max_results:
break
yield self._tweet_func(tweet)
self.total_results += 1
if self.next_token and self.total_results < self.max_results and self.n_requests <= self.max_requests:
self.rule_payload = merge_dicts(self.rule_payload,
{"next": self.next_token})
logger.info("paging; total requests read so far: {}"
.format(self.n_requests))
self.execute_request()
else:
break
logger.info("ending stream at {} tweets".format(self.total_results))
self.current_tweets = None
self.session.close() | [
"def",
"stream",
"(",
"self",
")",
":",
"self",
".",
"init_session",
"(",
")",
"self",
".",
"check_counts",
"(",
")",
"self",
".",
"execute_request",
"(",
")",
"self",
".",
"stream_started",
"=",
"True",
"while",
"True",
":",
"for",
"tweet",
"in",
"self",
".",
"current_tweets",
":",
"if",
"self",
".",
"total_results",
">=",
"self",
".",
"max_results",
":",
"break",
"yield",
"self",
".",
"_tweet_func",
"(",
"tweet",
")",
"self",
".",
"total_results",
"+=",
"1",
"if",
"self",
".",
"next_token",
"and",
"self",
".",
"total_results",
"<",
"self",
".",
"max_results",
"and",
"self",
".",
"n_requests",
"<=",
"self",
".",
"max_requests",
":",
"self",
".",
"rule_payload",
"=",
"merge_dicts",
"(",
"self",
".",
"rule_payload",
",",
"{",
"\"next\"",
":",
"self",
".",
"next_token",
"}",
")",
"logger",
".",
"info",
"(",
"\"paging; total requests read so far: {}\"",
".",
"format",
"(",
"self",
".",
"n_requests",
")",
")",
"self",
".",
"execute_request",
"(",
")",
"else",
":",
"break",
"logger",
".",
"info",
"(",
"\"ending stream at {} tweets\"",
".",
"format",
"(",
"self",
".",
"total_results",
")",
")",
"self",
".",
"current_tweets",
"=",
"None",
"self",
".",
"session",
".",
"close",
"(",
")"
] | Main entry point for the data from the API. Will automatically paginate
through the results via the ``next`` token and return up to ``max_results``
tweets or up to ``max_requests`` API calls, whichever is lower.
Usage:
>>> result_stream = ResultStream(**kwargs)
>>> stream = result_stream.stream()
>>> results = list(stream)
>>> # or for faster usage...
>>> results = list(ResultStream(**kwargs).stream()) | [
"Main",
"entry",
"point",
"for",
"the",
"data",
"from",
"the",
"API",
".",
"Will",
"automatically",
"paginate",
"through",
"the",
"results",
"via",
"the",
"next",
"token",
"and",
"return",
"up",
"to",
"max_results",
"tweets",
"or",
"up",
"to",
"max_requests",
"API",
"calls",
"whichever",
"is",
"lower",
"."
] | 7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5 | https://github.com/twitterdev/search-tweets-python/blob/7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5/searchtweets/result_stream.py#L193-L227 | train | 237,311 |
twitterdev/search-tweets-python | searchtweets/result_stream.py | ResultStream.init_session | def init_session(self):
"""
Defines a session object for passing requests.
"""
if self.session:
self.session.close()
self.session = make_session(self.username,
self.password,
self.bearer_token,
self.extra_headers_dict) | python | def init_session(self):
"""
Defines a session object for passing requests.
"""
if self.session:
self.session.close()
self.session = make_session(self.username,
self.password,
self.bearer_token,
self.extra_headers_dict) | [
"def",
"init_session",
"(",
"self",
")",
":",
"if",
"self",
".",
"session",
":",
"self",
".",
"session",
".",
"close",
"(",
")",
"self",
".",
"session",
"=",
"make_session",
"(",
"self",
".",
"username",
",",
"self",
".",
"password",
",",
"self",
".",
"bearer_token",
",",
"self",
".",
"extra_headers_dict",
")"
] | Defines a session object for passing requests. | [
"Defines",
"a",
"session",
"object",
"for",
"passing",
"requests",
"."
] | 7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5 | https://github.com/twitterdev/search-tweets-python/blob/7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5/searchtweets/result_stream.py#L229-L238 | train | 237,312 |
twitterdev/search-tweets-python | searchtweets/result_stream.py | ResultStream.check_counts | def check_counts(self):
"""
Disables tweet parsing if the count API is used.
"""
if "counts" in re.split("[/.]", self.endpoint):
logger.info("disabling tweet parsing due to counts API usage")
self._tweet_func = lambda x: x | python | def check_counts(self):
"""
Disables tweet parsing if the count API is used.
"""
if "counts" in re.split("[/.]", self.endpoint):
logger.info("disabling tweet parsing due to counts API usage")
self._tweet_func = lambda x: x | [
"def",
"check_counts",
"(",
"self",
")",
":",
"if",
"\"counts\"",
"in",
"re",
".",
"split",
"(",
"\"[/.]\"",
",",
"self",
".",
"endpoint",
")",
":",
"logger",
".",
"info",
"(",
"\"disabling tweet parsing due to counts API usage\"",
")",
"self",
".",
"_tweet_func",
"=",
"lambda",
"x",
":",
"x"
] | Disables tweet parsing if the count API is used. | [
"Disables",
"tweet",
"parsing",
"if",
"the",
"count",
"API",
"is",
"used",
"."
] | 7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5 | https://github.com/twitterdev/search-tweets-python/blob/7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5/searchtweets/result_stream.py#L240-L246 | train | 237,313 |
twitterdev/search-tweets-python | searchtweets/result_stream.py | ResultStream.execute_request | def execute_request(self):
"""
Sends the request to the API and parses the json response.
Makes some assumptions about the session length and sets the presence
of a "next" token.
"""
if self.n_requests % 20 == 0 and self.n_requests > 1:
logger.info("refreshing session")
self.init_session()
resp = request(session=self.session,
url=self.endpoint,
rule_payload=self.rule_payload)
self.n_requests += 1
ResultStream.session_request_counter += 1
resp = json.loads(resp.content.decode(resp.encoding))
self.next_token = resp.get("next", None)
self.current_tweets = resp["results"] | python | def execute_request(self):
"""
Sends the request to the API and parses the json response.
Makes some assumptions about the session length and sets the presence
of a "next" token.
"""
if self.n_requests % 20 == 0 and self.n_requests > 1:
logger.info("refreshing session")
self.init_session()
resp = request(session=self.session,
url=self.endpoint,
rule_payload=self.rule_payload)
self.n_requests += 1
ResultStream.session_request_counter += 1
resp = json.loads(resp.content.decode(resp.encoding))
self.next_token = resp.get("next", None)
self.current_tweets = resp["results"] | [
"def",
"execute_request",
"(",
"self",
")",
":",
"if",
"self",
".",
"n_requests",
"%",
"20",
"==",
"0",
"and",
"self",
".",
"n_requests",
">",
"1",
":",
"logger",
".",
"info",
"(",
"\"refreshing session\"",
")",
"self",
".",
"init_session",
"(",
")",
"resp",
"=",
"request",
"(",
"session",
"=",
"self",
".",
"session",
",",
"url",
"=",
"self",
".",
"endpoint",
",",
"rule_payload",
"=",
"self",
".",
"rule_payload",
")",
"self",
".",
"n_requests",
"+=",
"1",
"ResultStream",
".",
"session_request_counter",
"+=",
"1",
"resp",
"=",
"json",
".",
"loads",
"(",
"resp",
".",
"content",
".",
"decode",
"(",
"resp",
".",
"encoding",
")",
")",
"self",
".",
"next_token",
"=",
"resp",
".",
"get",
"(",
"\"next\"",
",",
"None",
")",
"self",
".",
"current_tweets",
"=",
"resp",
"[",
"\"results\"",
"]"
] | Sends the request to the API and parses the json response.
Makes some assumptions about the session length and sets the presence
of a "next" token. | [
"Sends",
"the",
"request",
"to",
"the",
"API",
"and",
"parses",
"the",
"json",
"response",
".",
"Makes",
"some",
"assumptions",
"about",
"the",
"session",
"length",
"and",
"sets",
"the",
"presence",
"of",
"a",
"next",
"token",
"."
] | 7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5 | https://github.com/twitterdev/search-tweets-python/blob/7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5/searchtweets/result_stream.py#L248-L265 | train | 237,314 |
twitterdev/search-tweets-python | searchtweets/api_utils.py | gen_rule_payload | def gen_rule_payload(pt_rule, results_per_call=None,
from_date=None, to_date=None, count_bucket=None,
tag=None,
stringify=True):
"""
Generates the dict or json payload for a PowerTrack rule.
Args:
pt_rule (str): The string version of a powertrack rule,
e.g., "beyonce has:geo". Accepts multi-line strings
for ease of entry.
results_per_call (int): number of tweets or counts returned per API
call. This maps to the ``maxResults`` search API parameter.
Defaults to 500 to reduce API call usage.
from_date (str or None): Date format as specified by
`convert_utc_time` for the starting time of your search.
to_date (str or None): date format as specified by `convert_utc_time`
for the end time of your search.
count_bucket (str or None): If using the counts api endpoint,
will define the count bucket for which tweets are aggregated.
stringify (bool): specifies the return type, `dict`
or json-formatted `str`.
Example:
>>> from searchtweets.utils import gen_rule_payload
>>> gen_rule_payload("beyonce has:geo",
... from_date="2017-08-21",
... to_date="2017-08-22")
'{"query":"beyonce has:geo","maxResults":100,"toDate":"201708220000","fromDate":"201708210000"}'
"""
pt_rule = ' '.join(pt_rule.split()) # allows multi-line strings
payload = {"query": pt_rule}
if results_per_call is not None and isinstance(results_per_call, int) is True:
payload["maxResults"] = results_per_call
if to_date:
payload["toDate"] = convert_utc_time(to_date)
if from_date:
payload["fromDate"] = convert_utc_time(from_date)
if count_bucket:
if set(["day", "hour", "minute"]) & set([count_bucket]):
payload["bucket"] = count_bucket
del payload["maxResults"]
else:
logger.error("invalid count bucket: provided {}"
.format(count_bucket))
raise ValueError
if tag:
payload["tag"] = tag
return json.dumps(payload) if stringify else payload | python | def gen_rule_payload(pt_rule, results_per_call=None,
from_date=None, to_date=None, count_bucket=None,
tag=None,
stringify=True):
"""
Generates the dict or json payload for a PowerTrack rule.
Args:
pt_rule (str): The string version of a powertrack rule,
e.g., "beyonce has:geo". Accepts multi-line strings
for ease of entry.
results_per_call (int): number of tweets or counts returned per API
call. This maps to the ``maxResults`` search API parameter.
Defaults to 500 to reduce API call usage.
from_date (str or None): Date format as specified by
`convert_utc_time` for the starting time of your search.
to_date (str or None): date format as specified by `convert_utc_time`
for the end time of your search.
count_bucket (str or None): If using the counts api endpoint,
will define the count bucket for which tweets are aggregated.
stringify (bool): specifies the return type, `dict`
or json-formatted `str`.
Example:
>>> from searchtweets.utils import gen_rule_payload
>>> gen_rule_payload("beyonce has:geo",
... from_date="2017-08-21",
... to_date="2017-08-22")
'{"query":"beyonce has:geo","maxResults":100,"toDate":"201708220000","fromDate":"201708210000"}'
"""
pt_rule = ' '.join(pt_rule.split()) # allows multi-line strings
payload = {"query": pt_rule}
if results_per_call is not None and isinstance(results_per_call, int) is True:
payload["maxResults"] = results_per_call
if to_date:
payload["toDate"] = convert_utc_time(to_date)
if from_date:
payload["fromDate"] = convert_utc_time(from_date)
if count_bucket:
if set(["day", "hour", "minute"]) & set([count_bucket]):
payload["bucket"] = count_bucket
del payload["maxResults"]
else:
logger.error("invalid count bucket: provided {}"
.format(count_bucket))
raise ValueError
if tag:
payload["tag"] = tag
return json.dumps(payload) if stringify else payload | [
"def",
"gen_rule_payload",
"(",
"pt_rule",
",",
"results_per_call",
"=",
"None",
",",
"from_date",
"=",
"None",
",",
"to_date",
"=",
"None",
",",
"count_bucket",
"=",
"None",
",",
"tag",
"=",
"None",
",",
"stringify",
"=",
"True",
")",
":",
"pt_rule",
"=",
"' '",
".",
"join",
"(",
"pt_rule",
".",
"split",
"(",
")",
")",
"# allows multi-line strings",
"payload",
"=",
"{",
"\"query\"",
":",
"pt_rule",
"}",
"if",
"results_per_call",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"results_per_call",
",",
"int",
")",
"is",
"True",
":",
"payload",
"[",
"\"maxResults\"",
"]",
"=",
"results_per_call",
"if",
"to_date",
":",
"payload",
"[",
"\"toDate\"",
"]",
"=",
"convert_utc_time",
"(",
"to_date",
")",
"if",
"from_date",
":",
"payload",
"[",
"\"fromDate\"",
"]",
"=",
"convert_utc_time",
"(",
"from_date",
")",
"if",
"count_bucket",
":",
"if",
"set",
"(",
"[",
"\"day\"",
",",
"\"hour\"",
",",
"\"minute\"",
"]",
")",
"&",
"set",
"(",
"[",
"count_bucket",
"]",
")",
":",
"payload",
"[",
"\"bucket\"",
"]",
"=",
"count_bucket",
"del",
"payload",
"[",
"\"maxResults\"",
"]",
"else",
":",
"logger",
".",
"error",
"(",
"\"invalid count bucket: provided {}\"",
".",
"format",
"(",
"count_bucket",
")",
")",
"raise",
"ValueError",
"if",
"tag",
":",
"payload",
"[",
"\"tag\"",
"]",
"=",
"tag",
"return",
"json",
".",
"dumps",
"(",
"payload",
")",
"if",
"stringify",
"else",
"payload"
] | Generates the dict or json payload for a PowerTrack rule.
Args:
pt_rule (str): The string version of a powertrack rule,
e.g., "beyonce has:geo". Accepts multi-line strings
for ease of entry.
results_per_call (int): number of tweets or counts returned per API
call. This maps to the ``maxResults`` search API parameter.
Defaults to 500 to reduce API call usage.
from_date (str or None): Date format as specified by
`convert_utc_time` for the starting time of your search.
to_date (str or None): date format as specified by `convert_utc_time`
for the end time of your search.
count_bucket (str or None): If using the counts api endpoint,
will define the count bucket for which tweets are aggregated.
stringify (bool): specifies the return type, `dict`
or json-formatted `str`.
Example:
>>> from searchtweets.utils import gen_rule_payload
>>> gen_rule_payload("beyonce has:geo",
... from_date="2017-08-21",
... to_date="2017-08-22")
'{"query":"beyonce has:geo","maxResults":100,"toDate":"201708220000","fromDate":"201708210000"}' | [
"Generates",
"the",
"dict",
"or",
"json",
"payload",
"for",
"a",
"PowerTrack",
"rule",
"."
] | 7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5 | https://github.com/twitterdev/search-tweets-python/blob/7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5/searchtweets/api_utils.py#L86-L138 | train | 237,315 |
twitterdev/search-tweets-python | searchtweets/api_utils.py | gen_params_from_config | def gen_params_from_config(config_dict):
"""
Generates parameters for a ResultStream from a dictionary.
"""
if config_dict.get("count_bucket"):
logger.warning("change your endpoint to the count endpoint; this is "
"default behavior when the count bucket "
"field is defined")
endpoint = change_to_count_endpoint(config_dict.get("endpoint"))
else:
endpoint = config_dict.get("endpoint")
def intify(arg):
if not isinstance(arg, int) and arg is not None:
return int(arg)
else:
return arg
# this parameter comes in as a string when it's parsed
results_per_call = intify(config_dict.get("results_per_call", None))
rule = gen_rule_payload(pt_rule=config_dict["pt_rule"],
from_date=config_dict.get("from_date", None),
to_date=config_dict.get("to_date", None),
results_per_call=results_per_call,
count_bucket=config_dict.get("count_bucket", None))
_dict = {"endpoint": endpoint,
"username": config_dict.get("username"),
"password": config_dict.get("password"),
"bearer_token": config_dict.get("bearer_token"),
"extra_headers_dict": config_dict.get("extra_headers_dict",None),
"rule_payload": rule,
"results_per_file": intify(config_dict.get("results_per_file")),
"max_results": intify(config_dict.get("max_results")),
"max_pages": intify(config_dict.get("max_pages", None))}
return _dict | python | def gen_params_from_config(config_dict):
"""
Generates parameters for a ResultStream from a dictionary.
"""
if config_dict.get("count_bucket"):
logger.warning("change your endpoint to the count endpoint; this is "
"default behavior when the count bucket "
"field is defined")
endpoint = change_to_count_endpoint(config_dict.get("endpoint"))
else:
endpoint = config_dict.get("endpoint")
def intify(arg):
if not isinstance(arg, int) and arg is not None:
return int(arg)
else:
return arg
# this parameter comes in as a string when it's parsed
results_per_call = intify(config_dict.get("results_per_call", None))
rule = gen_rule_payload(pt_rule=config_dict["pt_rule"],
from_date=config_dict.get("from_date", None),
to_date=config_dict.get("to_date", None),
results_per_call=results_per_call,
count_bucket=config_dict.get("count_bucket", None))
_dict = {"endpoint": endpoint,
"username": config_dict.get("username"),
"password": config_dict.get("password"),
"bearer_token": config_dict.get("bearer_token"),
"extra_headers_dict": config_dict.get("extra_headers_dict",None),
"rule_payload": rule,
"results_per_file": intify(config_dict.get("results_per_file")),
"max_results": intify(config_dict.get("max_results")),
"max_pages": intify(config_dict.get("max_pages", None))}
return _dict | [
"def",
"gen_params_from_config",
"(",
"config_dict",
")",
":",
"if",
"config_dict",
".",
"get",
"(",
"\"count_bucket\"",
")",
":",
"logger",
".",
"warning",
"(",
"\"change your endpoint to the count endpoint; this is \"",
"\"default behavior when the count bucket \"",
"\"field is defined\"",
")",
"endpoint",
"=",
"change_to_count_endpoint",
"(",
"config_dict",
".",
"get",
"(",
"\"endpoint\"",
")",
")",
"else",
":",
"endpoint",
"=",
"config_dict",
".",
"get",
"(",
"\"endpoint\"",
")",
"def",
"intify",
"(",
"arg",
")",
":",
"if",
"not",
"isinstance",
"(",
"arg",
",",
"int",
")",
"and",
"arg",
"is",
"not",
"None",
":",
"return",
"int",
"(",
"arg",
")",
"else",
":",
"return",
"arg",
"# this parameter comes in as a string when it's parsed",
"results_per_call",
"=",
"intify",
"(",
"config_dict",
".",
"get",
"(",
"\"results_per_call\"",
",",
"None",
")",
")",
"rule",
"=",
"gen_rule_payload",
"(",
"pt_rule",
"=",
"config_dict",
"[",
"\"pt_rule\"",
"]",
",",
"from_date",
"=",
"config_dict",
".",
"get",
"(",
"\"from_date\"",
",",
"None",
")",
",",
"to_date",
"=",
"config_dict",
".",
"get",
"(",
"\"to_date\"",
",",
"None",
")",
",",
"results_per_call",
"=",
"results_per_call",
",",
"count_bucket",
"=",
"config_dict",
".",
"get",
"(",
"\"count_bucket\"",
",",
"None",
")",
")",
"_dict",
"=",
"{",
"\"endpoint\"",
":",
"endpoint",
",",
"\"username\"",
":",
"config_dict",
".",
"get",
"(",
"\"username\"",
")",
",",
"\"password\"",
":",
"config_dict",
".",
"get",
"(",
"\"password\"",
")",
",",
"\"bearer_token\"",
":",
"config_dict",
".",
"get",
"(",
"\"bearer_token\"",
")",
",",
"\"extra_headers_dict\"",
":",
"config_dict",
".",
"get",
"(",
"\"extra_headers_dict\"",
",",
"None",
")",
",",
"\"rule_payload\"",
":",
"rule",
",",
"\"results_per_file\"",
":",
"intify",
"(",
"config_dict",
".",
"get",
"(",
"\"results_per_file\"",
")",
")",
",",
"\"max_results\"",
":",
"intify",
"(",
"config_dict",
".",
"get",
"(",
"\"max_results\"",
")",
")",
",",
"\"max_pages\"",
":",
"intify",
"(",
"config_dict",
".",
"get",
"(",
"\"max_pages\"",
",",
"None",
")",
")",
"}",
"return",
"_dict"
] | Generates parameters for a ResultStream from a dictionary. | [
"Generates",
"parameters",
"for",
"a",
"ResultStream",
"from",
"a",
"dictionary",
"."
] | 7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5 | https://github.com/twitterdev/search-tweets-python/blob/7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5/searchtweets/api_utils.py#L141-L179 | train | 237,316 |
twitterdev/search-tweets-python | searchtweets/api_utils.py | infer_endpoint | def infer_endpoint(rule_payload):
"""
Infer which endpoint should be used for a given rule payload.
"""
bucket = (rule_payload if isinstance(rule_payload, dict)
else json.loads(rule_payload)).get("bucket")
return "counts" if bucket else "search" | python | def infer_endpoint(rule_payload):
"""
Infer which endpoint should be used for a given rule payload.
"""
bucket = (rule_payload if isinstance(rule_payload, dict)
else json.loads(rule_payload)).get("bucket")
return "counts" if bucket else "search" | [
"def",
"infer_endpoint",
"(",
"rule_payload",
")",
":",
"bucket",
"=",
"(",
"rule_payload",
"if",
"isinstance",
"(",
"rule_payload",
",",
"dict",
")",
"else",
"json",
".",
"loads",
"(",
"rule_payload",
")",
")",
".",
"get",
"(",
"\"bucket\"",
")",
"return",
"\"counts\"",
"if",
"bucket",
"else",
"\"search\""
] | Infer which endpoint should be used for a given rule payload. | [
"Infer",
"which",
"endpoint",
"should",
"be",
"used",
"for",
"a",
"given",
"rule",
"payload",
"."
] | 7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5 | https://github.com/twitterdev/search-tweets-python/blob/7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5/searchtweets/api_utils.py#L182-L188 | train | 237,317 |
twitterdev/search-tweets-python | searchtweets/api_utils.py | validate_count_api | def validate_count_api(rule_payload, endpoint):
"""
Ensures that the counts api is set correctly in a payload.
"""
rule = (rule_payload if isinstance(rule_payload, dict)
else json.loads(rule_payload))
bucket = rule.get('bucket')
counts = set(endpoint.split("/")) & {"counts.json"}
if len(counts) == 0:
if bucket is not None:
msg = ("""There is a count bucket present in your payload,
but you are using not using the counts API.
Please check your endpoints and try again""")
logger.error(msg)
raise ValueError | python | def validate_count_api(rule_payload, endpoint):
"""
Ensures that the counts api is set correctly in a payload.
"""
rule = (rule_payload if isinstance(rule_payload, dict)
else json.loads(rule_payload))
bucket = rule.get('bucket')
counts = set(endpoint.split("/")) & {"counts.json"}
if len(counts) == 0:
if bucket is not None:
msg = ("""There is a count bucket present in your payload,
but you are using not using the counts API.
Please check your endpoints and try again""")
logger.error(msg)
raise ValueError | [
"def",
"validate_count_api",
"(",
"rule_payload",
",",
"endpoint",
")",
":",
"rule",
"=",
"(",
"rule_payload",
"if",
"isinstance",
"(",
"rule_payload",
",",
"dict",
")",
"else",
"json",
".",
"loads",
"(",
"rule_payload",
")",
")",
"bucket",
"=",
"rule",
".",
"get",
"(",
"'bucket'",
")",
"counts",
"=",
"set",
"(",
"endpoint",
".",
"split",
"(",
"\"/\"",
")",
")",
"&",
"{",
"\"counts.json\"",
"}",
"if",
"len",
"(",
"counts",
")",
"==",
"0",
":",
"if",
"bucket",
"is",
"not",
"None",
":",
"msg",
"=",
"(",
"\"\"\"There is a count bucket present in your payload,\n but you are using not using the counts API.\n Please check your endpoints and try again\"\"\"",
")",
"logger",
".",
"error",
"(",
"msg",
")",
"raise",
"ValueError"
] | Ensures that the counts api is set correctly in a payload. | [
"Ensures",
"that",
"the",
"counts",
"api",
"is",
"set",
"correctly",
"in",
"a",
"payload",
"."
] | 7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5 | https://github.com/twitterdev/search-tweets-python/blob/7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5/searchtweets/api_utils.py#L191-L205 | train | 237,318 |
twitterdev/search-tweets-python | searchtweets/utils.py | partition | def partition(iterable, chunk_size, pad_none=False):
"""adapted from Toolz. Breaks an iterable into n iterables up to the
certain chunk size, padding with Nones if availble.
Example:
>>> from searchtweets.utils import partition
>>> iter_ = range(10)
>>> list(partition(iter_, 3))
[(0, 1, 2), (3, 4, 5), (6, 7, 8)]
>>> list(partition(iter_, 3, pad_none=True))
[(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, None, None)]
"""
args = [iter(iterable)] * chunk_size
if not pad_none:
return zip(*args)
else:
return it.zip_longest(*args) | python | def partition(iterable, chunk_size, pad_none=False):
"""adapted from Toolz. Breaks an iterable into n iterables up to the
certain chunk size, padding with Nones if availble.
Example:
>>> from searchtweets.utils import partition
>>> iter_ = range(10)
>>> list(partition(iter_, 3))
[(0, 1, 2), (3, 4, 5), (6, 7, 8)]
>>> list(partition(iter_, 3, pad_none=True))
[(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, None, None)]
"""
args = [iter(iterable)] * chunk_size
if not pad_none:
return zip(*args)
else:
return it.zip_longest(*args) | [
"def",
"partition",
"(",
"iterable",
",",
"chunk_size",
",",
"pad_none",
"=",
"False",
")",
":",
"args",
"=",
"[",
"iter",
"(",
"iterable",
")",
"]",
"*",
"chunk_size",
"if",
"not",
"pad_none",
":",
"return",
"zip",
"(",
"*",
"args",
")",
"else",
":",
"return",
"it",
".",
"zip_longest",
"(",
"*",
"args",
")"
] | adapted from Toolz. Breaks an iterable into n iterables up to the
certain chunk size, padding with Nones if availble.
Example:
>>> from searchtweets.utils import partition
>>> iter_ = range(10)
>>> list(partition(iter_, 3))
[(0, 1, 2), (3, 4, 5), (6, 7, 8)]
>>> list(partition(iter_, 3, pad_none=True))
[(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, None, None)] | [
"adapted",
"from",
"Toolz",
".",
"Breaks",
"an",
"iterable",
"into",
"n",
"iterables",
"up",
"to",
"the",
"certain",
"chunk",
"size",
"padding",
"with",
"Nones",
"if",
"availble",
"."
] | 7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5 | https://github.com/twitterdev/search-tweets-python/blob/7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5/searchtweets/utils.py#L41-L57 | train | 237,319 |
twitterdev/search-tweets-python | searchtweets/utils.py | write_ndjson | def write_ndjson(filename, data_iterable, append=False, **kwargs):
"""
Generator that writes newline-delimited json to a file and returns items
from an iterable.
"""
write_mode = "ab" if append else "wb"
logger.info("writing to file {}".format(filename))
with codecs.open(filename, write_mode, "utf-8") as outfile:
for item in data_iterable:
outfile.write(json.dumps(item) + "\n")
yield item | python | def write_ndjson(filename, data_iterable, append=False, **kwargs):
"""
Generator that writes newline-delimited json to a file and returns items
from an iterable.
"""
write_mode = "ab" if append else "wb"
logger.info("writing to file {}".format(filename))
with codecs.open(filename, write_mode, "utf-8") as outfile:
for item in data_iterable:
outfile.write(json.dumps(item) + "\n")
yield item | [
"def",
"write_ndjson",
"(",
"filename",
",",
"data_iterable",
",",
"append",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"write_mode",
"=",
"\"ab\"",
"if",
"append",
"else",
"\"wb\"",
"logger",
".",
"info",
"(",
"\"writing to file {}\"",
".",
"format",
"(",
"filename",
")",
")",
"with",
"codecs",
".",
"open",
"(",
"filename",
",",
"write_mode",
",",
"\"utf-8\"",
")",
"as",
"outfile",
":",
"for",
"item",
"in",
"data_iterable",
":",
"outfile",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"item",
")",
"+",
"\"\\n\"",
")",
"yield",
"item"
] | Generator that writes newline-delimited json to a file and returns items
from an iterable. | [
"Generator",
"that",
"writes",
"newline",
"-",
"delimited",
"json",
"to",
"a",
"file",
"and",
"returns",
"items",
"from",
"an",
"iterable",
"."
] | 7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5 | https://github.com/twitterdev/search-tweets-python/blob/7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5/searchtweets/utils.py#L87-L97 | train | 237,320 |
twitterdev/search-tweets-python | searchtweets/utils.py | write_result_stream | def write_result_stream(result_stream, filename_prefix=None,
results_per_file=None, **kwargs):
"""
Wraps a ``ResultStream`` object to save it to a file. This function will still
return all data from the result stream as a generator that wraps the
``write_ndjson`` method.
Args:
result_stream (ResultStream): the unstarted ResultStream object
filename_prefix (str or None): the base name for file writing
results_per_file (int or None): the maximum number of tweets to write
per file. Defaults to having no max, which means one file. Multiple
files will be named by datetime, according to
``<prefix>_YYY-mm-ddTHH_MM_SS.json``.
"""
if isinstance(result_stream, types.GeneratorType):
stream = result_stream
else:
stream = result_stream.stream()
file_time_formatter = "%Y-%m-%dT%H_%M_%S"
if filename_prefix is None:
filename_prefix = "twitter_search_results"
if results_per_file:
logger.info("chunking result stream to files with {} tweets per file"
.format(results_per_file))
chunked_stream = partition(stream, results_per_file, pad_none=True)
for chunk in chunked_stream:
chunk = filter(lambda x: x is not None, chunk)
curr_datetime = (datetime.datetime.utcnow()
.strftime(file_time_formatter))
_filename = "{}_{}.json".format(filename_prefix, curr_datetime)
yield from write_ndjson(_filename, chunk)
else:
curr_datetime = (datetime.datetime.utcnow()
.strftime(file_time_formatter))
_filename = "{}.json".format(filename_prefix)
yield from write_ndjson(_filename, stream) | python | def write_result_stream(result_stream, filename_prefix=None,
results_per_file=None, **kwargs):
"""
Wraps a ``ResultStream`` object to save it to a file. This function will still
return all data from the result stream as a generator that wraps the
``write_ndjson`` method.
Args:
result_stream (ResultStream): the unstarted ResultStream object
filename_prefix (str or None): the base name for file writing
results_per_file (int or None): the maximum number of tweets to write
per file. Defaults to having no max, which means one file. Multiple
files will be named by datetime, according to
``<prefix>_YYY-mm-ddTHH_MM_SS.json``.
"""
if isinstance(result_stream, types.GeneratorType):
stream = result_stream
else:
stream = result_stream.stream()
file_time_formatter = "%Y-%m-%dT%H_%M_%S"
if filename_prefix is None:
filename_prefix = "twitter_search_results"
if results_per_file:
logger.info("chunking result stream to files with {} tweets per file"
.format(results_per_file))
chunked_stream = partition(stream, results_per_file, pad_none=True)
for chunk in chunked_stream:
chunk = filter(lambda x: x is not None, chunk)
curr_datetime = (datetime.datetime.utcnow()
.strftime(file_time_formatter))
_filename = "{}_{}.json".format(filename_prefix, curr_datetime)
yield from write_ndjson(_filename, chunk)
else:
curr_datetime = (datetime.datetime.utcnow()
.strftime(file_time_formatter))
_filename = "{}.json".format(filename_prefix)
yield from write_ndjson(_filename, stream) | [
"def",
"write_result_stream",
"(",
"result_stream",
",",
"filename_prefix",
"=",
"None",
",",
"results_per_file",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"result_stream",
",",
"types",
".",
"GeneratorType",
")",
":",
"stream",
"=",
"result_stream",
"else",
":",
"stream",
"=",
"result_stream",
".",
"stream",
"(",
")",
"file_time_formatter",
"=",
"\"%Y-%m-%dT%H_%M_%S\"",
"if",
"filename_prefix",
"is",
"None",
":",
"filename_prefix",
"=",
"\"twitter_search_results\"",
"if",
"results_per_file",
":",
"logger",
".",
"info",
"(",
"\"chunking result stream to files with {} tweets per file\"",
".",
"format",
"(",
"results_per_file",
")",
")",
"chunked_stream",
"=",
"partition",
"(",
"stream",
",",
"results_per_file",
",",
"pad_none",
"=",
"True",
")",
"for",
"chunk",
"in",
"chunked_stream",
":",
"chunk",
"=",
"filter",
"(",
"lambda",
"x",
":",
"x",
"is",
"not",
"None",
",",
"chunk",
")",
"curr_datetime",
"=",
"(",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
".",
"strftime",
"(",
"file_time_formatter",
")",
")",
"_filename",
"=",
"\"{}_{}.json\"",
".",
"format",
"(",
"filename_prefix",
",",
"curr_datetime",
")",
"yield",
"from",
"write_ndjson",
"(",
"_filename",
",",
"chunk",
")",
"else",
":",
"curr_datetime",
"=",
"(",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
".",
"strftime",
"(",
"file_time_formatter",
")",
")",
"_filename",
"=",
"\"{}.json\"",
".",
"format",
"(",
"filename_prefix",
")",
"yield",
"from",
"write_ndjson",
"(",
"_filename",
",",
"stream",
")"
] | Wraps a ``ResultStream`` object to save it to a file. This function will still
return all data from the result stream as a generator that wraps the
``write_ndjson`` method.
Args:
result_stream (ResultStream): the unstarted ResultStream object
filename_prefix (str or None): the base name for file writing
results_per_file (int or None): the maximum number of tweets to write
per file. Defaults to having no max, which means one file. Multiple
files will be named by datetime, according to
``<prefix>_YYY-mm-ddTHH_MM_SS.json``. | [
"Wraps",
"a",
"ResultStream",
"object",
"to",
"save",
"it",
"to",
"a",
"file",
".",
"This",
"function",
"will",
"still",
"return",
"all",
"data",
"from",
"the",
"result",
"stream",
"as",
"a",
"generator",
"that",
"wraps",
"the",
"write_ndjson",
"method",
"."
] | 7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5 | https://github.com/twitterdev/search-tweets-python/blob/7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5/searchtweets/utils.py#L100-L140 | train | 237,321 |
twitterdev/search-tweets-python | searchtweets/credentials.py | _load_yaml_credentials | def _load_yaml_credentials(filename=None, yaml_key=None):
"""Loads and parses credentials in a YAML file. Catches common exceptions
and returns an empty dict on error, which will be handled downstream.
Returns:
dict: parsed credentials or {}
"""
try:
with open(os.path.expanduser(filename)) as f:
search_creds = yaml.safe_load(f)[yaml_key]
except FileNotFoundError:
logger.error("cannot read file {}".format(filename))
search_creds = {}
except KeyError:
logger.error("{} is missing the provided key: {}"
.format(filename, yaml_key))
search_creds = {}
return search_creds | python | def _load_yaml_credentials(filename=None, yaml_key=None):
"""Loads and parses credentials in a YAML file. Catches common exceptions
and returns an empty dict on error, which will be handled downstream.
Returns:
dict: parsed credentials or {}
"""
try:
with open(os.path.expanduser(filename)) as f:
search_creds = yaml.safe_load(f)[yaml_key]
except FileNotFoundError:
logger.error("cannot read file {}".format(filename))
search_creds = {}
except KeyError:
logger.error("{} is missing the provided key: {}"
.format(filename, yaml_key))
search_creds = {}
return search_creds | [
"def",
"_load_yaml_credentials",
"(",
"filename",
"=",
"None",
",",
"yaml_key",
"=",
"None",
")",
":",
"try",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"filename",
")",
")",
"as",
"f",
":",
"search_creds",
"=",
"yaml",
".",
"safe_load",
"(",
"f",
")",
"[",
"yaml_key",
"]",
"except",
"FileNotFoundError",
":",
"logger",
".",
"error",
"(",
"\"cannot read file {}\"",
".",
"format",
"(",
"filename",
")",
")",
"search_creds",
"=",
"{",
"}",
"except",
"KeyError",
":",
"logger",
".",
"error",
"(",
"\"{} is missing the provided key: {}\"",
".",
"format",
"(",
"filename",
",",
"yaml_key",
")",
")",
"search_creds",
"=",
"{",
"}",
"return",
"search_creds"
] | Loads and parses credentials in a YAML file. Catches common exceptions
and returns an empty dict on error, which will be handled downstream.
Returns:
dict: parsed credentials or {} | [
"Loads",
"and",
"parses",
"credentials",
"in",
"a",
"YAML",
"file",
".",
"Catches",
"common",
"exceptions",
"and",
"returns",
"an",
"empty",
"dict",
"on",
"error",
"which",
"will",
"be",
"handled",
"downstream",
"."
] | 7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5 | https://github.com/twitterdev/search-tweets-python/blob/7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5/searchtweets/credentials.py#L25-L43 | train | 237,322 |
twitterdev/search-tweets-python | searchtweets/credentials.py | _generate_bearer_token | def _generate_bearer_token(consumer_key, consumer_secret):
"""
Return the bearer token for a given pair of consumer key and secret values.
"""
data = [('grant_type', 'client_credentials')]
resp = requests.post(OAUTH_ENDPOINT,
data=data,
auth=(consumer_key, consumer_secret))
logger.warning("Grabbing bearer token from OAUTH")
if resp.status_code >= 400:
logger.error(resp.text)
resp.raise_for_status()
return resp.json()['access_token'] | python | def _generate_bearer_token(consumer_key, consumer_secret):
"""
Return the bearer token for a given pair of consumer key and secret values.
"""
data = [('grant_type', 'client_credentials')]
resp = requests.post(OAUTH_ENDPOINT,
data=data,
auth=(consumer_key, consumer_secret))
logger.warning("Grabbing bearer token from OAUTH")
if resp.status_code >= 400:
logger.error(resp.text)
resp.raise_for_status()
return resp.json()['access_token'] | [
"def",
"_generate_bearer_token",
"(",
"consumer_key",
",",
"consumer_secret",
")",
":",
"data",
"=",
"[",
"(",
"'grant_type'",
",",
"'client_credentials'",
")",
"]",
"resp",
"=",
"requests",
".",
"post",
"(",
"OAUTH_ENDPOINT",
",",
"data",
"=",
"data",
",",
"auth",
"=",
"(",
"consumer_key",
",",
"consumer_secret",
")",
")",
"logger",
".",
"warning",
"(",
"\"Grabbing bearer token from OAUTH\"",
")",
"if",
"resp",
".",
"status_code",
">=",
"400",
":",
"logger",
".",
"error",
"(",
"resp",
".",
"text",
")",
"resp",
".",
"raise_for_status",
"(",
")",
"return",
"resp",
".",
"json",
"(",
")",
"[",
"'access_token'",
"]"
] | Return the bearer token for a given pair of consumer key and secret values. | [
"Return",
"the",
"bearer",
"token",
"for",
"a",
"given",
"pair",
"of",
"consumer",
"key",
"and",
"secret",
"values",
"."
] | 7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5 | https://github.com/twitterdev/search-tweets-python/blob/7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5/searchtweets/credentials.py#L193-L206 | train | 237,323 |
kvesteri/validators | validators/i18n/fi.py | fi_business_id | def fi_business_id(business_id):
"""
Validate a Finnish Business ID.
Each company in Finland has a distinct business id. For more
information see `Finnish Trade Register`_
.. _Finnish Trade Register:
http://en.wikipedia.org/wiki/Finnish_Trade_Register
Examples::
>>> fi_business_id('0112038-9') # Fast Monkeys Ltd
True
>>> fi_business_id('1234567-8') # Bogus ID
ValidationFailure(func=fi_business_id, ...)
.. versionadded:: 0.4
.. versionchanged:: 0.5
Method renamed from ``finnish_business_id`` to ``fi_business_id``
:param business_id: business_id to validate
"""
if not business_id or not re.match(business_id_pattern, business_id):
return False
factors = [7, 9, 10, 5, 8, 4, 2]
numbers = map(int, business_id[:7])
checksum = int(business_id[8])
sum_ = sum(f * n for f, n in zip(factors, numbers))
modulo = sum_ % 11
return (11 - modulo == checksum) or (modulo == 0 and checksum == 0) | python | def fi_business_id(business_id):
"""
Validate a Finnish Business ID.
Each company in Finland has a distinct business id. For more
information see `Finnish Trade Register`_
.. _Finnish Trade Register:
http://en.wikipedia.org/wiki/Finnish_Trade_Register
Examples::
>>> fi_business_id('0112038-9') # Fast Monkeys Ltd
True
>>> fi_business_id('1234567-8') # Bogus ID
ValidationFailure(func=fi_business_id, ...)
.. versionadded:: 0.4
.. versionchanged:: 0.5
Method renamed from ``finnish_business_id`` to ``fi_business_id``
:param business_id: business_id to validate
"""
if not business_id or not re.match(business_id_pattern, business_id):
return False
factors = [7, 9, 10, 5, 8, 4, 2]
numbers = map(int, business_id[:7])
checksum = int(business_id[8])
sum_ = sum(f * n for f, n in zip(factors, numbers))
modulo = sum_ % 11
return (11 - modulo == checksum) or (modulo == 0 and checksum == 0) | [
"def",
"fi_business_id",
"(",
"business_id",
")",
":",
"if",
"not",
"business_id",
"or",
"not",
"re",
".",
"match",
"(",
"business_id_pattern",
",",
"business_id",
")",
":",
"return",
"False",
"factors",
"=",
"[",
"7",
",",
"9",
",",
"10",
",",
"5",
",",
"8",
",",
"4",
",",
"2",
"]",
"numbers",
"=",
"map",
"(",
"int",
",",
"business_id",
"[",
":",
"7",
"]",
")",
"checksum",
"=",
"int",
"(",
"business_id",
"[",
"8",
"]",
")",
"sum_",
"=",
"sum",
"(",
"f",
"*",
"n",
"for",
"f",
",",
"n",
"in",
"zip",
"(",
"factors",
",",
"numbers",
")",
")",
"modulo",
"=",
"sum_",
"%",
"11",
"return",
"(",
"11",
"-",
"modulo",
"==",
"checksum",
")",
"or",
"(",
"modulo",
"==",
"0",
"and",
"checksum",
"==",
"0",
")"
] | Validate a Finnish Business ID.
Each company in Finland has a distinct business id. For more
information see `Finnish Trade Register`_
.. _Finnish Trade Register:
http://en.wikipedia.org/wiki/Finnish_Trade_Register
Examples::
>>> fi_business_id('0112038-9') # Fast Monkeys Ltd
True
>>> fi_business_id('1234567-8') # Bogus ID
ValidationFailure(func=fi_business_id, ...)
.. versionadded:: 0.4
.. versionchanged:: 0.5
Method renamed from ``finnish_business_id`` to ``fi_business_id``
:param business_id: business_id to validate | [
"Validate",
"a",
"Finnish",
"Business",
"ID",
"."
] | 34d355e87168241e872b25811d245810df2bd430 | https://github.com/kvesteri/validators/blob/34d355e87168241e872b25811d245810df2bd430/validators/i18n/fi.py#L20-L51 | train | 237,324 |
kvesteri/validators | validators/i18n/fi.py | fi_ssn | def fi_ssn(ssn, allow_temporal_ssn=True):
"""
Validate a Finnish Social Security Number.
This validator is based on `django-localflavor-fi`_.
.. _django-localflavor-fi:
https://github.com/django/django-localflavor-fi/
Examples::
>>> fi_ssn('010101-0101')
True
>>> fi_ssn('101010-0102')
ValidationFailure(func=fi_ssn, args=...)
.. versionadded:: 0.5
:param ssn: Social Security Number to validate
:param allow_temporal_ssn:
Whether to accept temporal SSN numbers. Temporal SSN numbers are the
ones where the serial is in the range [900-999]. By default temporal
SSN numbers are valid.
"""
if not ssn:
return False
result = re.match(ssn_pattern, ssn)
if not result:
return False
gd = result.groupdict()
checksum = int(gd['date'] + gd['serial'])
return (
int(gd['serial']) >= 2 and
(allow_temporal_ssn or int(gd['serial']) <= 899) and
ssn_checkmarks[checksum % len(ssn_checkmarks)] ==
gd['checksum']
) | python | def fi_ssn(ssn, allow_temporal_ssn=True):
"""
Validate a Finnish Social Security Number.
This validator is based on `django-localflavor-fi`_.
.. _django-localflavor-fi:
https://github.com/django/django-localflavor-fi/
Examples::
>>> fi_ssn('010101-0101')
True
>>> fi_ssn('101010-0102')
ValidationFailure(func=fi_ssn, args=...)
.. versionadded:: 0.5
:param ssn: Social Security Number to validate
:param allow_temporal_ssn:
Whether to accept temporal SSN numbers. Temporal SSN numbers are the
ones where the serial is in the range [900-999]. By default temporal
SSN numbers are valid.
"""
if not ssn:
return False
result = re.match(ssn_pattern, ssn)
if not result:
return False
gd = result.groupdict()
checksum = int(gd['date'] + gd['serial'])
return (
int(gd['serial']) >= 2 and
(allow_temporal_ssn or int(gd['serial']) <= 899) and
ssn_checkmarks[checksum % len(ssn_checkmarks)] ==
gd['checksum']
) | [
"def",
"fi_ssn",
"(",
"ssn",
",",
"allow_temporal_ssn",
"=",
"True",
")",
":",
"if",
"not",
"ssn",
":",
"return",
"False",
"result",
"=",
"re",
".",
"match",
"(",
"ssn_pattern",
",",
"ssn",
")",
"if",
"not",
"result",
":",
"return",
"False",
"gd",
"=",
"result",
".",
"groupdict",
"(",
")",
"checksum",
"=",
"int",
"(",
"gd",
"[",
"'date'",
"]",
"+",
"gd",
"[",
"'serial'",
"]",
")",
"return",
"(",
"int",
"(",
"gd",
"[",
"'serial'",
"]",
")",
">=",
"2",
"and",
"(",
"allow_temporal_ssn",
"or",
"int",
"(",
"gd",
"[",
"'serial'",
"]",
")",
"<=",
"899",
")",
"and",
"ssn_checkmarks",
"[",
"checksum",
"%",
"len",
"(",
"ssn_checkmarks",
")",
"]",
"==",
"gd",
"[",
"'checksum'",
"]",
")"
] | Validate a Finnish Social Security Number.
This validator is based on `django-localflavor-fi`_.
.. _django-localflavor-fi:
https://github.com/django/django-localflavor-fi/
Examples::
>>> fi_ssn('010101-0101')
True
>>> fi_ssn('101010-0102')
ValidationFailure(func=fi_ssn, args=...)
.. versionadded:: 0.5
:param ssn: Social Security Number to validate
:param allow_temporal_ssn:
Whether to accept temporal SSN numbers. Temporal SSN numbers are the
ones where the serial is in the range [900-999]. By default temporal
SSN numbers are valid. | [
"Validate",
"a",
"Finnish",
"Social",
"Security",
"Number",
"."
] | 34d355e87168241e872b25811d245810df2bd430 | https://github.com/kvesteri/validators/blob/34d355e87168241e872b25811d245810df2bd430/validators/i18n/fi.py#L55-L94 | train | 237,325 |
kvesteri/validators | validators/iban.py | modcheck | def modcheck(value):
"""Check if the value string passes the mod97-test.
"""
# move country code and check numbers to end
rearranged = value[4:] + value[:4]
# convert letters to numbers
converted = [char_value(char) for char in rearranged]
# interpret as integer
integerized = int(''.join([str(i) for i in converted]))
return (integerized % 97 == 1) | python | def modcheck(value):
"""Check if the value string passes the mod97-test.
"""
# move country code and check numbers to end
rearranged = value[4:] + value[:4]
# convert letters to numbers
converted = [char_value(char) for char in rearranged]
# interpret as integer
integerized = int(''.join([str(i) for i in converted]))
return (integerized % 97 == 1) | [
"def",
"modcheck",
"(",
"value",
")",
":",
"# move country code and check numbers to end",
"rearranged",
"=",
"value",
"[",
"4",
":",
"]",
"+",
"value",
"[",
":",
"4",
"]",
"# convert letters to numbers",
"converted",
"=",
"[",
"char_value",
"(",
"char",
")",
"for",
"char",
"in",
"rearranged",
"]",
"# interpret as integer",
"integerized",
"=",
"int",
"(",
"''",
".",
"join",
"(",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"converted",
"]",
")",
")",
"return",
"(",
"integerized",
"%",
"97",
"==",
"1",
")"
] | Check if the value string passes the mod97-test. | [
"Check",
"if",
"the",
"value",
"string",
"passes",
"the",
"mod97",
"-",
"test",
"."
] | 34d355e87168241e872b25811d245810df2bd430 | https://github.com/kvesteri/validators/blob/34d355e87168241e872b25811d245810df2bd430/validators/iban.py#L20-L29 | train | 237,326 |
kvesteri/validators | validators/utils.py | func_args_as_dict | def func_args_as_dict(func, args, kwargs):
"""
Return given function's positional and key value arguments as an ordered
dictionary.
"""
if six.PY2:
_getargspec = inspect.getargspec
else:
_getargspec = inspect.getfullargspec
arg_names = list(
OrderedDict.fromkeys(
itertools.chain(
_getargspec(func)[0],
kwargs.keys()
)
)
)
return OrderedDict(
list(six.moves.zip(arg_names, args)) +
list(kwargs.items())
) | python | def func_args_as_dict(func, args, kwargs):
"""
Return given function's positional and key value arguments as an ordered
dictionary.
"""
if six.PY2:
_getargspec = inspect.getargspec
else:
_getargspec = inspect.getfullargspec
arg_names = list(
OrderedDict.fromkeys(
itertools.chain(
_getargspec(func)[0],
kwargs.keys()
)
)
)
return OrderedDict(
list(six.moves.zip(arg_names, args)) +
list(kwargs.items())
) | [
"def",
"func_args_as_dict",
"(",
"func",
",",
"args",
",",
"kwargs",
")",
":",
"if",
"six",
".",
"PY2",
":",
"_getargspec",
"=",
"inspect",
".",
"getargspec",
"else",
":",
"_getargspec",
"=",
"inspect",
".",
"getfullargspec",
"arg_names",
"=",
"list",
"(",
"OrderedDict",
".",
"fromkeys",
"(",
"itertools",
".",
"chain",
"(",
"_getargspec",
"(",
"func",
")",
"[",
"0",
"]",
",",
"kwargs",
".",
"keys",
"(",
")",
")",
")",
")",
"return",
"OrderedDict",
"(",
"list",
"(",
"six",
".",
"moves",
".",
"zip",
"(",
"arg_names",
",",
"args",
")",
")",
"+",
"list",
"(",
"kwargs",
".",
"items",
"(",
")",
")",
")"
] | Return given function's positional and key value arguments as an ordered
dictionary. | [
"Return",
"given",
"function",
"s",
"positional",
"and",
"key",
"value",
"arguments",
"as",
"an",
"ordered",
"dictionary",
"."
] | 34d355e87168241e872b25811d245810df2bd430 | https://github.com/kvesteri/validators/blob/34d355e87168241e872b25811d245810df2bd430/validators/utils.py#L35-L56 | train | 237,327 |
kvesteri/validators | validators/utils.py | validator | def validator(func, *args, **kwargs):
"""
A decorator that makes given function validator.
Whenever the given function is called and returns ``False`` value
this decorator returns :class:`ValidationFailure` object.
Example::
>>> @validator
... def even(value):
... return not (value % 2)
>>> even(4)
True
>>> even(5)
ValidationFailure(func=even, args={'value': 5})
:param func: function to decorate
:param args: positional function arguments
:param kwargs: key value function arguments
"""
def wrapper(func, *args, **kwargs):
value = func(*args, **kwargs)
if not value:
return ValidationFailure(
func, func_args_as_dict(func, args, kwargs)
)
return True
return decorator(wrapper, func) | python | def validator(func, *args, **kwargs):
"""
A decorator that makes given function validator.
Whenever the given function is called and returns ``False`` value
this decorator returns :class:`ValidationFailure` object.
Example::
>>> @validator
... def even(value):
... return not (value % 2)
>>> even(4)
True
>>> even(5)
ValidationFailure(func=even, args={'value': 5})
:param func: function to decorate
:param args: positional function arguments
:param kwargs: key value function arguments
"""
def wrapper(func, *args, **kwargs):
value = func(*args, **kwargs)
if not value:
return ValidationFailure(
func, func_args_as_dict(func, args, kwargs)
)
return True
return decorator(wrapper, func) | [
"def",
"validator",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"wrapper",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"value",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"value",
":",
"return",
"ValidationFailure",
"(",
"func",
",",
"func_args_as_dict",
"(",
"func",
",",
"args",
",",
"kwargs",
")",
")",
"return",
"True",
"return",
"decorator",
"(",
"wrapper",
",",
"func",
")"
] | A decorator that makes given function validator.
Whenever the given function is called and returns ``False`` value
this decorator returns :class:`ValidationFailure` object.
Example::
>>> @validator
... def even(value):
... return not (value % 2)
>>> even(4)
True
>>> even(5)
ValidationFailure(func=even, args={'value': 5})
:param func: function to decorate
:param args: positional function arguments
:param kwargs: key value function arguments | [
"A",
"decorator",
"that",
"makes",
"given",
"function",
"validator",
"."
] | 34d355e87168241e872b25811d245810df2bd430 | https://github.com/kvesteri/validators/blob/34d355e87168241e872b25811d245810df2bd430/validators/utils.py#L59-L89 | train | 237,328 |
kvesteri/validators | validators/length.py | length | def length(value, min=None, max=None):
"""
Return whether or not the length of given string is within a specified
range.
Examples::
>>> length('something', min=2)
True
>>> length('something', min=9, max=9)
True
>>> length('something', max=5)
ValidationFailure(func=length, ...)
:param value:
The string to validate.
:param min:
The minimum required length of the string. If not provided, minimum
length will not be checked.
:param max:
The maximum length of the string. If not provided, maximum length
will not be checked.
.. versionadded:: 0.2
"""
if (min is not None and min < 0) or (max is not None and max < 0):
raise AssertionError(
'`min` and `max` need to be greater than zero.'
)
return between(len(value), min=min, max=max) | python | def length(value, min=None, max=None):
"""
Return whether or not the length of given string is within a specified
range.
Examples::
>>> length('something', min=2)
True
>>> length('something', min=9, max=9)
True
>>> length('something', max=5)
ValidationFailure(func=length, ...)
:param value:
The string to validate.
:param min:
The minimum required length of the string. If not provided, minimum
length will not be checked.
:param max:
The maximum length of the string. If not provided, maximum length
will not be checked.
.. versionadded:: 0.2
"""
if (min is not None and min < 0) or (max is not None and max < 0):
raise AssertionError(
'`min` and `max` need to be greater than zero.'
)
return between(len(value), min=min, max=max) | [
"def",
"length",
"(",
"value",
",",
"min",
"=",
"None",
",",
"max",
"=",
"None",
")",
":",
"if",
"(",
"min",
"is",
"not",
"None",
"and",
"min",
"<",
"0",
")",
"or",
"(",
"max",
"is",
"not",
"None",
"and",
"max",
"<",
"0",
")",
":",
"raise",
"AssertionError",
"(",
"'`min` and `max` need to be greater than zero.'",
")",
"return",
"between",
"(",
"len",
"(",
"value",
")",
",",
"min",
"=",
"min",
",",
"max",
"=",
"max",
")"
] | Return whether or not the length of given string is within a specified
range.
Examples::
>>> length('something', min=2)
True
>>> length('something', min=9, max=9)
True
>>> length('something', max=5)
ValidationFailure(func=length, ...)
:param value:
The string to validate.
:param min:
The minimum required length of the string. If not provided, minimum
length will not be checked.
:param max:
The maximum length of the string. If not provided, maximum length
will not be checked.
.. versionadded:: 0.2 | [
"Return",
"whether",
"or",
"not",
"the",
"length",
"of",
"given",
"string",
"is",
"within",
"a",
"specified",
"range",
"."
] | 34d355e87168241e872b25811d245810df2bd430 | https://github.com/kvesteri/validators/blob/34d355e87168241e872b25811d245810df2bd430/validators/length.py#L6-L37 | train | 237,329 |
kvesteri/validators | validators/url.py | url | def url(value, public=False):
"""
Return whether or not given value is a valid URL.
If the value is valid URL this function returns ``True``, otherwise
:class:`~validators.utils.ValidationFailure`.
This validator is based on the wonderful `URL validator of dperini`_.
.. _URL validator of dperini:
https://gist.github.com/dperini/729294
Examples::
>>> url('http://foobar.dk')
True
>>> url('ftp://foobar.dk')
True
>>> url('http://10.0.0.1')
True
>>> url('http://foobar.d')
ValidationFailure(func=url, ...)
>>> url('http://10.0.0.1', public=True)
ValidationFailure(func=url, ...)
.. versionadded:: 0.2
.. versionchanged:: 0.10.2
Added support for various exotic URLs and fixed various false
positives.
.. versionchanged:: 0.10.3
Added ``public`` parameter.
.. versionchanged:: 0.11.0
Made the regular expression this function uses case insensitive.
.. versionchanged:: 0.11.3
Added support for URLs containing localhost
:param value: URL address string to validate
:param public: (default=False) Set True to only allow a public IP address
"""
result = pattern.match(value)
if not public:
return result
return result and not any(
(result.groupdict().get(key) for key in ('private_ip', 'private_host'))
) | python | def url(value, public=False):
"""
Return whether or not given value is a valid URL.
If the value is valid URL this function returns ``True``, otherwise
:class:`~validators.utils.ValidationFailure`.
This validator is based on the wonderful `URL validator of dperini`_.
.. _URL validator of dperini:
https://gist.github.com/dperini/729294
Examples::
>>> url('http://foobar.dk')
True
>>> url('ftp://foobar.dk')
True
>>> url('http://10.0.0.1')
True
>>> url('http://foobar.d')
ValidationFailure(func=url, ...)
>>> url('http://10.0.0.1', public=True)
ValidationFailure(func=url, ...)
.. versionadded:: 0.2
.. versionchanged:: 0.10.2
Added support for various exotic URLs and fixed various false
positives.
.. versionchanged:: 0.10.3
Added ``public`` parameter.
.. versionchanged:: 0.11.0
Made the regular expression this function uses case insensitive.
.. versionchanged:: 0.11.3
Added support for URLs containing localhost
:param value: URL address string to validate
:param public: (default=False) Set True to only allow a public IP address
"""
result = pattern.match(value)
if not public:
return result
return result and not any(
(result.groupdict().get(key) for key in ('private_ip', 'private_host'))
) | [
"def",
"url",
"(",
"value",
",",
"public",
"=",
"False",
")",
":",
"result",
"=",
"pattern",
".",
"match",
"(",
"value",
")",
"if",
"not",
"public",
":",
"return",
"result",
"return",
"result",
"and",
"not",
"any",
"(",
"(",
"result",
".",
"groupdict",
"(",
")",
".",
"get",
"(",
"key",
")",
"for",
"key",
"in",
"(",
"'private_ip'",
",",
"'private_host'",
")",
")",
")"
] | Return whether or not given value is a valid URL.
If the value is valid URL this function returns ``True``, otherwise
:class:`~validators.utils.ValidationFailure`.
This validator is based on the wonderful `URL validator of dperini`_.
.. _URL validator of dperini:
https://gist.github.com/dperini/729294
Examples::
>>> url('http://foobar.dk')
True
>>> url('ftp://foobar.dk')
True
>>> url('http://10.0.0.1')
True
>>> url('http://foobar.d')
ValidationFailure(func=url, ...)
>>> url('http://10.0.0.1', public=True)
ValidationFailure(func=url, ...)
.. versionadded:: 0.2
.. versionchanged:: 0.10.2
Added support for various exotic URLs and fixed various false
positives.
.. versionchanged:: 0.10.3
Added ``public`` parameter.
.. versionchanged:: 0.11.0
Made the regular expression this function uses case insensitive.
.. versionchanged:: 0.11.3
Added support for URLs containing localhost
:param value: URL address string to validate
:param public: (default=False) Set True to only allow a public IP address | [
"Return",
"whether",
"or",
"not",
"given",
"value",
"is",
"a",
"valid",
"URL",
"."
] | 34d355e87168241e872b25811d245810df2bd430 | https://github.com/kvesteri/validators/blob/34d355e87168241e872b25811d245810df2bd430/validators/url.py#L94-L151 | train | 237,330 |
kvesteri/validators | validators/ip_address.py | ipv4 | def ipv4(value):
"""
Return whether or not given value is a valid IP version 4 address.
This validator is based on `WTForms IPAddress validator`_
.. _WTForms IPAddress validator:
https://github.com/wtforms/wtforms/blob/master/wtforms/validators.py
Examples::
>>> ipv4('123.0.0.7')
True
>>> ipv4('900.80.70.11')
ValidationFailure(func=ipv4, args={'value': '900.80.70.11'})
.. versionadded:: 0.2
:param value: IP address string to validate
"""
groups = value.split('.')
if len(groups) != 4 or any(not x.isdigit() for x in groups):
return False
return all(0 <= int(part) < 256 for part in groups) | python | def ipv4(value):
"""
Return whether or not given value is a valid IP version 4 address.
This validator is based on `WTForms IPAddress validator`_
.. _WTForms IPAddress validator:
https://github.com/wtforms/wtforms/blob/master/wtforms/validators.py
Examples::
>>> ipv4('123.0.0.7')
True
>>> ipv4('900.80.70.11')
ValidationFailure(func=ipv4, args={'value': '900.80.70.11'})
.. versionadded:: 0.2
:param value: IP address string to validate
"""
groups = value.split('.')
if len(groups) != 4 or any(not x.isdigit() for x in groups):
return False
return all(0 <= int(part) < 256 for part in groups) | [
"def",
"ipv4",
"(",
"value",
")",
":",
"groups",
"=",
"value",
".",
"split",
"(",
"'.'",
")",
"if",
"len",
"(",
"groups",
")",
"!=",
"4",
"or",
"any",
"(",
"not",
"x",
".",
"isdigit",
"(",
")",
"for",
"x",
"in",
"groups",
")",
":",
"return",
"False",
"return",
"all",
"(",
"0",
"<=",
"int",
"(",
"part",
")",
"<",
"256",
"for",
"part",
"in",
"groups",
")"
] | Return whether or not given value is a valid IP version 4 address.
This validator is based on `WTForms IPAddress validator`_
.. _WTForms IPAddress validator:
https://github.com/wtforms/wtforms/blob/master/wtforms/validators.py
Examples::
>>> ipv4('123.0.0.7')
True
>>> ipv4('900.80.70.11')
ValidationFailure(func=ipv4, args={'value': '900.80.70.11'})
.. versionadded:: 0.2
:param value: IP address string to validate | [
"Return",
"whether",
"or",
"not",
"given",
"value",
"is",
"a",
"valid",
"IP",
"version",
"4",
"address",
"."
] | 34d355e87168241e872b25811d245810df2bd430 | https://github.com/kvesteri/validators/blob/34d355e87168241e872b25811d245810df2bd430/validators/ip_address.py#L5-L29 | train | 237,331 |
eddyxu/cpp-coveralls | cpp_coveralls/report.py | post_report | def post_report(coverage, args):
"""Post coverage report to coveralls.io."""
response = requests.post(URL, files={'json_file': json.dumps(coverage)},
verify=(not args.skip_ssl_verify))
try:
result = response.json()
except ValueError:
result = {'error': 'Failure to submit data. '
'Response [%(status)s]: %(text)s' % {
'status': response.status_code,
'text': response.text}}
print(result)
if 'error' in result:
return result['error']
return 0 | python | def post_report(coverage, args):
"""Post coverage report to coveralls.io."""
response = requests.post(URL, files={'json_file': json.dumps(coverage)},
verify=(not args.skip_ssl_verify))
try:
result = response.json()
except ValueError:
result = {'error': 'Failure to submit data. '
'Response [%(status)s]: %(text)s' % {
'status': response.status_code,
'text': response.text}}
print(result)
if 'error' in result:
return result['error']
return 0 | [
"def",
"post_report",
"(",
"coverage",
",",
"args",
")",
":",
"response",
"=",
"requests",
".",
"post",
"(",
"URL",
",",
"files",
"=",
"{",
"'json_file'",
":",
"json",
".",
"dumps",
"(",
"coverage",
")",
"}",
",",
"verify",
"=",
"(",
"not",
"args",
".",
"skip_ssl_verify",
")",
")",
"try",
":",
"result",
"=",
"response",
".",
"json",
"(",
")",
"except",
"ValueError",
":",
"result",
"=",
"{",
"'error'",
":",
"'Failure to submit data. '",
"'Response [%(status)s]: %(text)s'",
"%",
"{",
"'status'",
":",
"response",
".",
"status_code",
",",
"'text'",
":",
"response",
".",
"text",
"}",
"}",
"print",
"(",
"result",
")",
"if",
"'error'",
"in",
"result",
":",
"return",
"result",
"[",
"'error'",
"]",
"return",
"0"
] | Post coverage report to coveralls.io. | [
"Post",
"coverage",
"report",
"to",
"coveralls",
".",
"io",
"."
] | ff7af7eea2a23828f6ab2541667ea04f94344dce | https://github.com/eddyxu/cpp-coveralls/blob/ff7af7eea2a23828f6ab2541667ea04f94344dce/cpp_coveralls/report.py#L10-L24 | train | 237,332 |
eddyxu/cpp-coveralls | cpp_coveralls/coverage.py | is_source_file | def is_source_file(args, filepath):
"""Returns true if it is a C++ source file."""
if args.extension:
return os.path.splitext(filepath)[1] in args.extension
else:
return os.path.splitext(filepath)[1] in _CPP_EXTENSIONS | python | def is_source_file(args, filepath):
"""Returns true if it is a C++ source file."""
if args.extension:
return os.path.splitext(filepath)[1] in args.extension
else:
return os.path.splitext(filepath)[1] in _CPP_EXTENSIONS | [
"def",
"is_source_file",
"(",
"args",
",",
"filepath",
")",
":",
"if",
"args",
".",
"extension",
":",
"return",
"os",
".",
"path",
".",
"splitext",
"(",
"filepath",
")",
"[",
"1",
"]",
"in",
"args",
".",
"extension",
"else",
":",
"return",
"os",
".",
"path",
".",
"splitext",
"(",
"filepath",
")",
"[",
"1",
"]",
"in",
"_CPP_EXTENSIONS"
] | Returns true if it is a C++ source file. | [
"Returns",
"true",
"if",
"it",
"is",
"a",
"C",
"++",
"source",
"file",
"."
] | ff7af7eea2a23828f6ab2541667ea04f94344dce | https://github.com/eddyxu/cpp-coveralls/blob/ff7af7eea2a23828f6ab2541667ea04f94344dce/cpp_coveralls/coverage.py#L93-L98 | train | 237,333 |
eddyxu/cpp-coveralls | cpp_coveralls/coverage.py | exclude_paths | def exclude_paths(args):
"""Returns the absolute paths for excluded path."""
results = []
if args.exclude:
for excl_path in args.exclude:
results.append(os.path.abspath(os.path.join(args.root, excl_path)))
return results | python | def exclude_paths(args):
"""Returns the absolute paths for excluded path."""
results = []
if args.exclude:
for excl_path in args.exclude:
results.append(os.path.abspath(os.path.join(args.root, excl_path)))
return results | [
"def",
"exclude_paths",
"(",
"args",
")",
":",
"results",
"=",
"[",
"]",
"if",
"args",
".",
"exclude",
":",
"for",
"excl_path",
"in",
"args",
".",
"exclude",
":",
"results",
".",
"append",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"root",
",",
"excl_path",
")",
")",
")",
"return",
"results"
] | Returns the absolute paths for excluded path. | [
"Returns",
"the",
"absolute",
"paths",
"for",
"excluded",
"path",
"."
] | ff7af7eea2a23828f6ab2541667ea04f94344dce | https://github.com/eddyxu/cpp-coveralls/blob/ff7af7eea2a23828f6ab2541667ea04f94344dce/cpp_coveralls/coverage.py#L101-L107 | train | 237,334 |
eddyxu/cpp-coveralls | cpp_coveralls/coverage.py | create_exclude_rules | def create_exclude_rules(args):
"""Creates the exlude rules
"""
global _cached_exclude_rules
if _cached_exclude_rules is not None:
return _cached_exclude_rules
rules = []
for excl_path in args.exclude:
abspath = os.path.abspath(os.path.join(args.root, excl_path))
rules.append((abspath, True))
for incl_path in args.include:
abspath = os.path.abspath(os.path.join(args.root, incl_path))
rules.append((abspath, False))
_cached_exclude_rules = sorted(rules, key=lambda p: p[0])
return _cached_exclude_rules | python | def create_exclude_rules(args):
"""Creates the exlude rules
"""
global _cached_exclude_rules
if _cached_exclude_rules is not None:
return _cached_exclude_rules
rules = []
for excl_path in args.exclude:
abspath = os.path.abspath(os.path.join(args.root, excl_path))
rules.append((abspath, True))
for incl_path in args.include:
abspath = os.path.abspath(os.path.join(args.root, incl_path))
rules.append((abspath, False))
_cached_exclude_rules = sorted(rules, key=lambda p: p[0])
return _cached_exclude_rules | [
"def",
"create_exclude_rules",
"(",
"args",
")",
":",
"global",
"_cached_exclude_rules",
"if",
"_cached_exclude_rules",
"is",
"not",
"None",
":",
"return",
"_cached_exclude_rules",
"rules",
"=",
"[",
"]",
"for",
"excl_path",
"in",
"args",
".",
"exclude",
":",
"abspath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"root",
",",
"excl_path",
")",
")",
"rules",
".",
"append",
"(",
"(",
"abspath",
",",
"True",
")",
")",
"for",
"incl_path",
"in",
"args",
".",
"include",
":",
"abspath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"root",
",",
"incl_path",
")",
")",
"rules",
".",
"append",
"(",
"(",
"abspath",
",",
"False",
")",
")",
"_cached_exclude_rules",
"=",
"sorted",
"(",
"rules",
",",
"key",
"=",
"lambda",
"p",
":",
"p",
"[",
"0",
"]",
")",
"return",
"_cached_exclude_rules"
] | Creates the exlude rules | [
"Creates",
"the",
"exlude",
"rules"
] | ff7af7eea2a23828f6ab2541667ea04f94344dce | https://github.com/eddyxu/cpp-coveralls/blob/ff7af7eea2a23828f6ab2541667ea04f94344dce/cpp_coveralls/coverage.py#L113-L127 | train | 237,335 |
eddyxu/cpp-coveralls | cpp_coveralls/coverage.py | is_excluded_path | def is_excluded_path(args, filepath):
"""Returns true if the filepath is under the one of the exclude path."""
# Try regular expressions first.
for regexp_exclude_path in args.regexp:
if re.match(regexp_exclude_path, filepath):
return True
abspath = os.path.abspath(filepath)
if args.include:
# If the file is outside of any include directories.
out_of_include_dirs = True
for incl_path in args.include:
absolute_include_path = os.path.abspath(os.path.join(args.root, incl_path))
if is_child_dir(absolute_include_path, abspath):
out_of_include_dirs = False
break
if out_of_include_dirs:
return True
excl_rules = create_exclude_rules(args)
for i, rule in enumerate(excl_rules):
if rule[0] == abspath:
return rule[1]
if is_child_dir(rule[0], abspath):
# continue to try to longest match.
last_result = rule[1]
for j in range(i + 1, len(excl_rules)):
rule_deep = excl_rules[j]
if not is_child_dir(rule_deep[0], abspath):
break
last_result = rule_deep[1]
return last_result
return False | python | def is_excluded_path(args, filepath):
"""Returns true if the filepath is under the one of the exclude path."""
# Try regular expressions first.
for regexp_exclude_path in args.regexp:
if re.match(regexp_exclude_path, filepath):
return True
abspath = os.path.abspath(filepath)
if args.include:
# If the file is outside of any include directories.
out_of_include_dirs = True
for incl_path in args.include:
absolute_include_path = os.path.abspath(os.path.join(args.root, incl_path))
if is_child_dir(absolute_include_path, abspath):
out_of_include_dirs = False
break
if out_of_include_dirs:
return True
excl_rules = create_exclude_rules(args)
for i, rule in enumerate(excl_rules):
if rule[0] == abspath:
return rule[1]
if is_child_dir(rule[0], abspath):
# continue to try to longest match.
last_result = rule[1]
for j in range(i + 1, len(excl_rules)):
rule_deep = excl_rules[j]
if not is_child_dir(rule_deep[0], abspath):
break
last_result = rule_deep[1]
return last_result
return False | [
"def",
"is_excluded_path",
"(",
"args",
",",
"filepath",
")",
":",
"# Try regular expressions first.",
"for",
"regexp_exclude_path",
"in",
"args",
".",
"regexp",
":",
"if",
"re",
".",
"match",
"(",
"regexp_exclude_path",
",",
"filepath",
")",
":",
"return",
"True",
"abspath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"filepath",
")",
"if",
"args",
".",
"include",
":",
"# If the file is outside of any include directories.",
"out_of_include_dirs",
"=",
"True",
"for",
"incl_path",
"in",
"args",
".",
"include",
":",
"absolute_include_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"root",
",",
"incl_path",
")",
")",
"if",
"is_child_dir",
"(",
"absolute_include_path",
",",
"abspath",
")",
":",
"out_of_include_dirs",
"=",
"False",
"break",
"if",
"out_of_include_dirs",
":",
"return",
"True",
"excl_rules",
"=",
"create_exclude_rules",
"(",
"args",
")",
"for",
"i",
",",
"rule",
"in",
"enumerate",
"(",
"excl_rules",
")",
":",
"if",
"rule",
"[",
"0",
"]",
"==",
"abspath",
":",
"return",
"rule",
"[",
"1",
"]",
"if",
"is_child_dir",
"(",
"rule",
"[",
"0",
"]",
",",
"abspath",
")",
":",
"# continue to try to longest match.",
"last_result",
"=",
"rule",
"[",
"1",
"]",
"for",
"j",
"in",
"range",
"(",
"i",
"+",
"1",
",",
"len",
"(",
"excl_rules",
")",
")",
":",
"rule_deep",
"=",
"excl_rules",
"[",
"j",
"]",
"if",
"not",
"is_child_dir",
"(",
"rule_deep",
"[",
"0",
"]",
",",
"abspath",
")",
":",
"break",
"last_result",
"=",
"rule_deep",
"[",
"1",
"]",
"return",
"last_result",
"return",
"False"
] | Returns true if the filepath is under the one of the exclude path. | [
"Returns",
"true",
"if",
"the",
"filepath",
"is",
"under",
"the",
"one",
"of",
"the",
"exclude",
"path",
"."
] | ff7af7eea2a23828f6ab2541667ea04f94344dce | https://github.com/eddyxu/cpp-coveralls/blob/ff7af7eea2a23828f6ab2541667ea04f94344dce/cpp_coveralls/coverage.py#L135-L166 | train | 237,336 |
eddyxu/cpp-coveralls | cpp_coveralls/coverage.py | filter_dirs | def filter_dirs(root, dirs, excl_paths):
"""Filter directory paths based on the exclusion rules defined in
'excl_paths'.
"""
filtered_dirs = []
for dirpath in dirs:
abspath = os.path.abspath(os.path.join(root, dirpath))
if os.path.basename(abspath) in _SKIP_DIRS:
continue
if abspath not in excl_paths:
filtered_dirs.append(dirpath)
return filtered_dirs | python | def filter_dirs(root, dirs, excl_paths):
"""Filter directory paths based on the exclusion rules defined in
'excl_paths'.
"""
filtered_dirs = []
for dirpath in dirs:
abspath = os.path.abspath(os.path.join(root, dirpath))
if os.path.basename(abspath) in _SKIP_DIRS:
continue
if abspath not in excl_paths:
filtered_dirs.append(dirpath)
return filtered_dirs | [
"def",
"filter_dirs",
"(",
"root",
",",
"dirs",
",",
"excl_paths",
")",
":",
"filtered_dirs",
"=",
"[",
"]",
"for",
"dirpath",
"in",
"dirs",
":",
"abspath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"dirpath",
")",
")",
"if",
"os",
".",
"path",
".",
"basename",
"(",
"abspath",
")",
"in",
"_SKIP_DIRS",
":",
"continue",
"if",
"abspath",
"not",
"in",
"excl_paths",
":",
"filtered_dirs",
".",
"append",
"(",
"dirpath",
")",
"return",
"filtered_dirs"
] | Filter directory paths based on the exclusion rules defined in
'excl_paths'. | [
"Filter",
"directory",
"paths",
"based",
"on",
"the",
"exclusion",
"rules",
"defined",
"in",
"excl_paths",
"."
] | ff7af7eea2a23828f6ab2541667ea04f94344dce | https://github.com/eddyxu/cpp-coveralls/blob/ff7af7eea2a23828f6ab2541667ea04f94344dce/cpp_coveralls/coverage.py#L186-L197 | train | 237,337 |
eddyxu/cpp-coveralls | cpp_coveralls/coverage.py | parse_gcov_file | def parse_gcov_file(args, fobj, filename):
"""Parses the content of .gcov file
"""
coverage = []
ignoring = False
for line in fobj:
report_fields = line.decode('utf-8', 'replace').split(':', 2)
if len(report_fields) == 1:
continue
line_num = report_fields[1].strip()
if line_num == '':
continue
cov_num = report_fields[0].strip()
line_num = int(line_num)
text = report_fields[2]
if line_num == 0:
continue
if re.search(r'\bLCOV_EXCL_START\b', text):
if ignoring:
sys.stderr.write("Warning: %s:%d: nested LCOV_EXCL_START, "
"please fix\n" % (filename, line_num))
ignoring = True
elif re.search(r'\bLCOV_EXCL_(STOP|END)\b', text):
if not ignoring:
sys.stderr.write("Warning: %s:%d: LCOV_EXCL_STOP outside of "
"exclusion zone, please fix\n" % (filename,
line_num))
if 'LCOV_EXCL_END' in text:
sys.stderr.write("Warning: %s:%d: LCOV_EXCL_STOP is the "
"correct keyword\n" % (filename, line_num))
ignoring = False
if cov_num == '-':
coverage.append(None)
elif cov_num == '#####':
# Avoid false positives.
if (
ignoring or
any([re.search(pattern, text) for pattern in args.exclude_lines_pattern])
):
coverage.append(None)
else:
coverage.append(0)
elif cov_num == '=====':
# This is indicitive of a gcov output parse
# error.
coverage.append(0)
else:
coverage.append(int(cov_num.rstrip('*')))
return coverage | python | def parse_gcov_file(args, fobj, filename):
"""Parses the content of .gcov file
"""
coverage = []
ignoring = False
for line in fobj:
report_fields = line.decode('utf-8', 'replace').split(':', 2)
if len(report_fields) == 1:
continue
line_num = report_fields[1].strip()
if line_num == '':
continue
cov_num = report_fields[0].strip()
line_num = int(line_num)
text = report_fields[2]
if line_num == 0:
continue
if re.search(r'\bLCOV_EXCL_START\b', text):
if ignoring:
sys.stderr.write("Warning: %s:%d: nested LCOV_EXCL_START, "
"please fix\n" % (filename, line_num))
ignoring = True
elif re.search(r'\bLCOV_EXCL_(STOP|END)\b', text):
if not ignoring:
sys.stderr.write("Warning: %s:%d: LCOV_EXCL_STOP outside of "
"exclusion zone, please fix\n" % (filename,
line_num))
if 'LCOV_EXCL_END' in text:
sys.stderr.write("Warning: %s:%d: LCOV_EXCL_STOP is the "
"correct keyword\n" % (filename, line_num))
ignoring = False
if cov_num == '-':
coverage.append(None)
elif cov_num == '#####':
# Avoid false positives.
if (
ignoring or
any([re.search(pattern, text) for pattern in args.exclude_lines_pattern])
):
coverage.append(None)
else:
coverage.append(0)
elif cov_num == '=====':
# This is indicitive of a gcov output parse
# error.
coverage.append(0)
else:
coverage.append(int(cov_num.rstrip('*')))
return coverage | [
"def",
"parse_gcov_file",
"(",
"args",
",",
"fobj",
",",
"filename",
")",
":",
"coverage",
"=",
"[",
"]",
"ignoring",
"=",
"False",
"for",
"line",
"in",
"fobj",
":",
"report_fields",
"=",
"line",
".",
"decode",
"(",
"'utf-8'",
",",
"'replace'",
")",
".",
"split",
"(",
"':'",
",",
"2",
")",
"if",
"len",
"(",
"report_fields",
")",
"==",
"1",
":",
"continue",
"line_num",
"=",
"report_fields",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"if",
"line_num",
"==",
"''",
":",
"continue",
"cov_num",
"=",
"report_fields",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"line_num",
"=",
"int",
"(",
"line_num",
")",
"text",
"=",
"report_fields",
"[",
"2",
"]",
"if",
"line_num",
"==",
"0",
":",
"continue",
"if",
"re",
".",
"search",
"(",
"r'\\bLCOV_EXCL_START\\b'",
",",
"text",
")",
":",
"if",
"ignoring",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"Warning: %s:%d: nested LCOV_EXCL_START, \"",
"\"please fix\\n\"",
"%",
"(",
"filename",
",",
"line_num",
")",
")",
"ignoring",
"=",
"True",
"elif",
"re",
".",
"search",
"(",
"r'\\bLCOV_EXCL_(STOP|END)\\b'",
",",
"text",
")",
":",
"if",
"not",
"ignoring",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"Warning: %s:%d: LCOV_EXCL_STOP outside of \"",
"\"exclusion zone, please fix\\n\"",
"%",
"(",
"filename",
",",
"line_num",
")",
")",
"if",
"'LCOV_EXCL_END'",
"in",
"text",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"Warning: %s:%d: LCOV_EXCL_STOP is the \"",
"\"correct keyword\\n\"",
"%",
"(",
"filename",
",",
"line_num",
")",
")",
"ignoring",
"=",
"False",
"if",
"cov_num",
"==",
"'-'",
":",
"coverage",
".",
"append",
"(",
"None",
")",
"elif",
"cov_num",
"==",
"'#####'",
":",
"# Avoid false positives.",
"if",
"(",
"ignoring",
"or",
"any",
"(",
"[",
"re",
".",
"search",
"(",
"pattern",
",",
"text",
")",
"for",
"pattern",
"in",
"args",
".",
"exclude_lines_pattern",
"]",
")",
")",
":",
"coverage",
".",
"append",
"(",
"None",
")",
"else",
":",
"coverage",
".",
"append",
"(",
"0",
")",
"elif",
"cov_num",
"==",
"'====='",
":",
"# This is indicitive of a gcov output parse",
"# error.",
"coverage",
".",
"append",
"(",
"0",
")",
"else",
":",
"coverage",
".",
"append",
"(",
"int",
"(",
"cov_num",
".",
"rstrip",
"(",
"'*'",
")",
")",
")",
"return",
"coverage"
] | Parses the content of .gcov file | [
"Parses",
"the",
"content",
"of",
".",
"gcov",
"file"
] | ff7af7eea2a23828f6ab2541667ea04f94344dce | https://github.com/eddyxu/cpp-coveralls/blob/ff7af7eea2a23828f6ab2541667ea04f94344dce/cpp_coveralls/coverage.py#L247-L296 | train | 237,338 |
eddyxu/cpp-coveralls | cpp_coveralls/coverage.py | parse_lcov_file_info | def parse_lcov_file_info(args, filepath, line_iter, line_coverage_re, file_end_string):
""" Parse the file content in lcov info file
"""
coverage = []
lines_covered = []
for line in line_iter:
if line != "end_of_record":
line_coverage_match = line_coverage_re.match(line)
if line_coverage_match:
line_no = line_coverage_match.group(1)
cov_count = int(line_coverage_match.group(2))
if args.max_cov_count:
if cov_count > args.max_cov_count:
cov_count = args.max_cov_count + 1
lines_covered.append((line_no, cov_count))
else:
break
num_code_lines = len([line.rstrip('\n') for line in open(filepath, 'r')])
coverage = [None] * num_code_lines
for line_covered in lines_covered:
coverage[int(line_covered[0]) - 1] = line_covered[1]
return coverage | python | def parse_lcov_file_info(args, filepath, line_iter, line_coverage_re, file_end_string):
""" Parse the file content in lcov info file
"""
coverage = []
lines_covered = []
for line in line_iter:
if line != "end_of_record":
line_coverage_match = line_coverage_re.match(line)
if line_coverage_match:
line_no = line_coverage_match.group(1)
cov_count = int(line_coverage_match.group(2))
if args.max_cov_count:
if cov_count > args.max_cov_count:
cov_count = args.max_cov_count + 1
lines_covered.append((line_no, cov_count))
else:
break
num_code_lines = len([line.rstrip('\n') for line in open(filepath, 'r')])
coverage = [None] * num_code_lines
for line_covered in lines_covered:
coverage[int(line_covered[0]) - 1] = line_covered[1]
return coverage | [
"def",
"parse_lcov_file_info",
"(",
"args",
",",
"filepath",
",",
"line_iter",
",",
"line_coverage_re",
",",
"file_end_string",
")",
":",
"coverage",
"=",
"[",
"]",
"lines_covered",
"=",
"[",
"]",
"for",
"line",
"in",
"line_iter",
":",
"if",
"line",
"!=",
"\"end_of_record\"",
":",
"line_coverage_match",
"=",
"line_coverage_re",
".",
"match",
"(",
"line",
")",
"if",
"line_coverage_match",
":",
"line_no",
"=",
"line_coverage_match",
".",
"group",
"(",
"1",
")",
"cov_count",
"=",
"int",
"(",
"line_coverage_match",
".",
"group",
"(",
"2",
")",
")",
"if",
"args",
".",
"max_cov_count",
":",
"if",
"cov_count",
">",
"args",
".",
"max_cov_count",
":",
"cov_count",
"=",
"args",
".",
"max_cov_count",
"+",
"1",
"lines_covered",
".",
"append",
"(",
"(",
"line_no",
",",
"cov_count",
")",
")",
"else",
":",
"break",
"num_code_lines",
"=",
"len",
"(",
"[",
"line",
".",
"rstrip",
"(",
"'\\n'",
")",
"for",
"line",
"in",
"open",
"(",
"filepath",
",",
"'r'",
")",
"]",
")",
"coverage",
"=",
"[",
"None",
"]",
"*",
"num_code_lines",
"for",
"line_covered",
"in",
"lines_covered",
":",
"coverage",
"[",
"int",
"(",
"line_covered",
"[",
"0",
"]",
")",
"-",
"1",
"]",
"=",
"line_covered",
"[",
"1",
"]",
"return",
"coverage"
] | Parse the file content in lcov info file | [
"Parse",
"the",
"file",
"content",
"in",
"lcov",
"info",
"file"
] | ff7af7eea2a23828f6ab2541667ea04f94344dce | https://github.com/eddyxu/cpp-coveralls/blob/ff7af7eea2a23828f6ab2541667ea04f94344dce/cpp_coveralls/coverage.py#L299-L322 | train | 237,339 |
eddyxu/cpp-coveralls | cpp_coveralls/coverage.py | combine_reports | def combine_reports(original, new):
"""Combines two gcov reports for a file into one by adding the number of hits on each line
"""
if original is None:
return new
report = {}
report['name'] = original['name']
report['source_digest'] = original['source_digest']
coverage = []
for original_num, new_num in zip(original['coverage'], new['coverage']):
if original_num is None:
coverage.append(new_num)
elif new_num is None:
coverage.append(original_num)
else:
coverage.append(original_num + new_num)
report['coverage'] = coverage
return report | python | def combine_reports(original, new):
"""Combines two gcov reports for a file into one by adding the number of hits on each line
"""
if original is None:
return new
report = {}
report['name'] = original['name']
report['source_digest'] = original['source_digest']
coverage = []
for original_num, new_num in zip(original['coverage'], new['coverage']):
if original_num is None:
coverage.append(new_num)
elif new_num is None:
coverage.append(original_num)
else:
coverage.append(original_num + new_num)
report['coverage'] = coverage
return report | [
"def",
"combine_reports",
"(",
"original",
",",
"new",
")",
":",
"if",
"original",
"is",
"None",
":",
"return",
"new",
"report",
"=",
"{",
"}",
"report",
"[",
"'name'",
"]",
"=",
"original",
"[",
"'name'",
"]",
"report",
"[",
"'source_digest'",
"]",
"=",
"original",
"[",
"'source_digest'",
"]",
"coverage",
"=",
"[",
"]",
"for",
"original_num",
",",
"new_num",
"in",
"zip",
"(",
"original",
"[",
"'coverage'",
"]",
",",
"new",
"[",
"'coverage'",
"]",
")",
":",
"if",
"original_num",
"is",
"None",
":",
"coverage",
".",
"append",
"(",
"new_num",
")",
"elif",
"new_num",
"is",
"None",
":",
"coverage",
".",
"append",
"(",
"original_num",
")",
"else",
":",
"coverage",
".",
"append",
"(",
"original_num",
"+",
"new_num",
")",
"report",
"[",
"'coverage'",
"]",
"=",
"coverage",
"return",
"report"
] | Combines two gcov reports for a file into one by adding the number of hits on each line | [
"Combines",
"two",
"gcov",
"reports",
"for",
"a",
"file",
"into",
"one",
"by",
"adding",
"the",
"number",
"of",
"hits",
"on",
"each",
"line"
] | ff7af7eea2a23828f6ab2541667ea04f94344dce | https://github.com/eddyxu/cpp-coveralls/blob/ff7af7eea2a23828f6ab2541667ea04f94344dce/cpp_coveralls/coverage.py#L324-L342 | train | 237,340 |
eddyxu/cpp-coveralls | cpp_coveralls/coverage.py | collect_non_report_files | def collect_non_report_files(args, discovered_files):
"""Collects the source files that have no coverage reports.
"""
excl_paths = exclude_paths(args)
abs_root = os.path.abspath(args.root)
non_report_files = []
for root, dirs, files in os.walk(args.root, followlinks=args.follow_symlinks):
dirs[:] = filter_dirs(root, dirs, excl_paths)
for filename in files:
if not is_source_file(args, filename):
continue
abs_filepath = os.path.join(os.path.abspath(root), filename)
if is_excluded_path(args, abs_filepath):
continue
filepath = os.path.relpath(abs_filepath, abs_root)
if filepath not in discovered_files:
src_report = {}
src_report['name'] = posix_path(filepath)
coverage = []
with io.open(abs_filepath, mode='rb') as fobj:
for _ in fobj:
coverage.append(None)
fobj.seek(0)
src_report['source_digest'] = hashlib.md5(fobj.read()).hexdigest()
src_report['coverage'] = coverage
non_report_files.append(src_report)
return non_report_files | python | def collect_non_report_files(args, discovered_files):
"""Collects the source files that have no coverage reports.
"""
excl_paths = exclude_paths(args)
abs_root = os.path.abspath(args.root)
non_report_files = []
for root, dirs, files in os.walk(args.root, followlinks=args.follow_symlinks):
dirs[:] = filter_dirs(root, dirs, excl_paths)
for filename in files:
if not is_source_file(args, filename):
continue
abs_filepath = os.path.join(os.path.abspath(root), filename)
if is_excluded_path(args, abs_filepath):
continue
filepath = os.path.relpath(abs_filepath, abs_root)
if filepath not in discovered_files:
src_report = {}
src_report['name'] = posix_path(filepath)
coverage = []
with io.open(abs_filepath, mode='rb') as fobj:
for _ in fobj:
coverage.append(None)
fobj.seek(0)
src_report['source_digest'] = hashlib.md5(fobj.read()).hexdigest()
src_report['coverage'] = coverage
non_report_files.append(src_report)
return non_report_files | [
"def",
"collect_non_report_files",
"(",
"args",
",",
"discovered_files",
")",
":",
"excl_paths",
"=",
"exclude_paths",
"(",
"args",
")",
"abs_root",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"args",
".",
"root",
")",
"non_report_files",
"=",
"[",
"]",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"args",
".",
"root",
",",
"followlinks",
"=",
"args",
".",
"follow_symlinks",
")",
":",
"dirs",
"[",
":",
"]",
"=",
"filter_dirs",
"(",
"root",
",",
"dirs",
",",
"excl_paths",
")",
"for",
"filename",
"in",
"files",
":",
"if",
"not",
"is_source_file",
"(",
"args",
",",
"filename",
")",
":",
"continue",
"abs_filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"root",
")",
",",
"filename",
")",
"if",
"is_excluded_path",
"(",
"args",
",",
"abs_filepath",
")",
":",
"continue",
"filepath",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"abs_filepath",
",",
"abs_root",
")",
"if",
"filepath",
"not",
"in",
"discovered_files",
":",
"src_report",
"=",
"{",
"}",
"src_report",
"[",
"'name'",
"]",
"=",
"posix_path",
"(",
"filepath",
")",
"coverage",
"=",
"[",
"]",
"with",
"io",
".",
"open",
"(",
"abs_filepath",
",",
"mode",
"=",
"'rb'",
")",
"as",
"fobj",
":",
"for",
"_",
"in",
"fobj",
":",
"coverage",
".",
"append",
"(",
"None",
")",
"fobj",
".",
"seek",
"(",
"0",
")",
"src_report",
"[",
"'source_digest'",
"]",
"=",
"hashlib",
".",
"md5",
"(",
"fobj",
".",
"read",
"(",
")",
")",
".",
"hexdigest",
"(",
")",
"src_report",
"[",
"'coverage'",
"]",
"=",
"coverage",
"non_report_files",
".",
"append",
"(",
"src_report",
")",
"return",
"non_report_files"
] | Collects the source files that have no coverage reports. | [
"Collects",
"the",
"source",
"files",
"that",
"have",
"no",
"coverage",
"reports",
"."
] | ff7af7eea2a23828f6ab2541667ea04f94344dce | https://github.com/eddyxu/cpp-coveralls/blob/ff7af7eea2a23828f6ab2541667ea04f94344dce/cpp_coveralls/coverage.py#L344-L371 | train | 237,341 |
eddyxu/cpp-coveralls | cpp_coveralls/__init__.py | parse_yaml_config | def parse_yaml_config(args):
"""Parse yaml config"""
try:
import yaml
except ImportError:
yaml = None
yml = {}
try:
with open(args.coveralls_yaml, 'r') as fp:
if not yaml:
raise SystemExit('PyYAML is required for parsing configuration')
yml = yaml.load(fp)
except IOError:
pass
yml = yml or {}
return yml | python | def parse_yaml_config(args):
"""Parse yaml config"""
try:
import yaml
except ImportError:
yaml = None
yml = {}
try:
with open(args.coveralls_yaml, 'r') as fp:
if not yaml:
raise SystemExit('PyYAML is required for parsing configuration')
yml = yaml.load(fp)
except IOError:
pass
yml = yml or {}
return yml | [
"def",
"parse_yaml_config",
"(",
"args",
")",
":",
"try",
":",
"import",
"yaml",
"except",
"ImportError",
":",
"yaml",
"=",
"None",
"yml",
"=",
"{",
"}",
"try",
":",
"with",
"open",
"(",
"args",
".",
"coveralls_yaml",
",",
"'r'",
")",
"as",
"fp",
":",
"if",
"not",
"yaml",
":",
"raise",
"SystemExit",
"(",
"'PyYAML is required for parsing configuration'",
")",
"yml",
"=",
"yaml",
".",
"load",
"(",
"fp",
")",
"except",
"IOError",
":",
"pass",
"yml",
"=",
"yml",
"or",
"{",
"}",
"return",
"yml"
] | Parse yaml config | [
"Parse",
"yaml",
"config"
] | ff7af7eea2a23828f6ab2541667ea04f94344dce | https://github.com/eddyxu/cpp-coveralls/blob/ff7af7eea2a23828f6ab2541667ea04f94344dce/cpp_coveralls/__init__.py#L37-L53 | train | 237,342 |
eddyxu/cpp-coveralls | cpp_coveralls/__init__.py | run | def run():
"""Run cpp coverage."""
import json
import os
import sys
from . import coverage, report
args = coverage.create_args(sys.argv[1:])
if args.verbose:
print('encodings: {}'.format(args.encodings))
yml = parse_yaml_config(args)
if not args.repo_token:
# try get token from yaml first
args.repo_token = yml.get('repo_token', '')
if not args.repo_token:
# use environment COVERALLS_REPO_TOKEN as a fallback
args.repo_token = os.environ.get('COVERALLS_REPO_TOKEN')
args.service_name = yml.get('service_name', 'travis-ci')
if not args.gcov_options:
args.gcov_options = yml.get('gcov_options', '')
if not args.root:
args.root = yml.get('root', '.')
if not args.build_root:
args.build_root = yml.get('build_root', '')
args.exclude.extend(yml.get('exclude', []))
args.include.extend(yml.get('include', []))
args.exclude_lines_pattern.extend(yml.get('exclude_lines_pattern', []))
args.service_job_id = os.environ.get('TRAVIS_JOB_ID', '')
if args.repo_token == '' and args.service_job_id == '':
raise ValueError("\nno coveralls.io token specified and no travis job id found\n"
"see --help for examples on how to specify a token\n")
if not args.no_gcov:
coverage.run_gcov(args)
cov_report = coverage.collect(args)
if args.verbose:
print(cov_report)
if args.dryrun:
return 0
if args.dump:
args.dump.write(json.dumps(cov_report))
return 0
return report.post_report(cov_report, args) | python | def run():
"""Run cpp coverage."""
import json
import os
import sys
from . import coverage, report
args = coverage.create_args(sys.argv[1:])
if args.verbose:
print('encodings: {}'.format(args.encodings))
yml = parse_yaml_config(args)
if not args.repo_token:
# try get token from yaml first
args.repo_token = yml.get('repo_token', '')
if not args.repo_token:
# use environment COVERALLS_REPO_TOKEN as a fallback
args.repo_token = os.environ.get('COVERALLS_REPO_TOKEN')
args.service_name = yml.get('service_name', 'travis-ci')
if not args.gcov_options:
args.gcov_options = yml.get('gcov_options', '')
if not args.root:
args.root = yml.get('root', '.')
if not args.build_root:
args.build_root = yml.get('build_root', '')
args.exclude.extend(yml.get('exclude', []))
args.include.extend(yml.get('include', []))
args.exclude_lines_pattern.extend(yml.get('exclude_lines_pattern', []))
args.service_job_id = os.environ.get('TRAVIS_JOB_ID', '')
if args.repo_token == '' and args.service_job_id == '':
raise ValueError("\nno coveralls.io token specified and no travis job id found\n"
"see --help for examples on how to specify a token\n")
if not args.no_gcov:
coverage.run_gcov(args)
cov_report = coverage.collect(args)
if args.verbose:
print(cov_report)
if args.dryrun:
return 0
if args.dump:
args.dump.write(json.dumps(cov_report))
return 0
return report.post_report(cov_report, args) | [
"def",
"run",
"(",
")",
":",
"import",
"json",
"import",
"os",
"import",
"sys",
"from",
".",
"import",
"coverage",
",",
"report",
"args",
"=",
"coverage",
".",
"create_args",
"(",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
")",
"if",
"args",
".",
"verbose",
":",
"print",
"(",
"'encodings: {}'",
".",
"format",
"(",
"args",
".",
"encodings",
")",
")",
"yml",
"=",
"parse_yaml_config",
"(",
"args",
")",
"if",
"not",
"args",
".",
"repo_token",
":",
"# try get token from yaml first",
"args",
".",
"repo_token",
"=",
"yml",
".",
"get",
"(",
"'repo_token'",
",",
"''",
")",
"if",
"not",
"args",
".",
"repo_token",
":",
"# use environment COVERALLS_REPO_TOKEN as a fallback",
"args",
".",
"repo_token",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'COVERALLS_REPO_TOKEN'",
")",
"args",
".",
"service_name",
"=",
"yml",
".",
"get",
"(",
"'service_name'",
",",
"'travis-ci'",
")",
"if",
"not",
"args",
".",
"gcov_options",
":",
"args",
".",
"gcov_options",
"=",
"yml",
".",
"get",
"(",
"'gcov_options'",
",",
"''",
")",
"if",
"not",
"args",
".",
"root",
":",
"args",
".",
"root",
"=",
"yml",
".",
"get",
"(",
"'root'",
",",
"'.'",
")",
"if",
"not",
"args",
".",
"build_root",
":",
"args",
".",
"build_root",
"=",
"yml",
".",
"get",
"(",
"'build_root'",
",",
"''",
")",
"args",
".",
"exclude",
".",
"extend",
"(",
"yml",
".",
"get",
"(",
"'exclude'",
",",
"[",
"]",
")",
")",
"args",
".",
"include",
".",
"extend",
"(",
"yml",
".",
"get",
"(",
"'include'",
",",
"[",
"]",
")",
")",
"args",
".",
"exclude_lines_pattern",
".",
"extend",
"(",
"yml",
".",
"get",
"(",
"'exclude_lines_pattern'",
",",
"[",
"]",
")",
")",
"args",
".",
"service_job_id",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'TRAVIS_JOB_ID'",
",",
"''",
")",
"if",
"args",
".",
"repo_token",
"==",
"''",
"and",
"args",
".",
"service_job_id",
"==",
"''",
":",
"raise",
"ValueError",
"(",
"\"\\nno coveralls.io token specified and no travis job id found\\n\"",
"\"see --help for examples on how to specify a token\\n\"",
")",
"if",
"not",
"args",
".",
"no_gcov",
":",
"coverage",
".",
"run_gcov",
"(",
"args",
")",
"cov_report",
"=",
"coverage",
".",
"collect",
"(",
"args",
")",
"if",
"args",
".",
"verbose",
":",
"print",
"(",
"cov_report",
")",
"if",
"args",
".",
"dryrun",
":",
"return",
"0",
"if",
"args",
".",
"dump",
":",
"args",
".",
"dump",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"cov_report",
")",
")",
"return",
"0",
"return",
"report",
".",
"post_report",
"(",
"cov_report",
",",
"args",
")"
] | Run cpp coverage. | [
"Run",
"cpp",
"coverage",
"."
] | ff7af7eea2a23828f6ab2541667ea04f94344dce | https://github.com/eddyxu/cpp-coveralls/blob/ff7af7eea2a23828f6ab2541667ea04f94344dce/cpp_coveralls/__init__.py#L55-L106 | train | 237,343 |
eddyxu/cpp-coveralls | cpp_coveralls/gitrepo.py | gitrepo | def gitrepo(cwd):
"""Return hash of Git data that can be used to display more information to
users.
Example:
"git": {
"head": {
"id": "5e837ce92220be64821128a70f6093f836dd2c05",
"author_name": "Wil Gieseler",
"author_email": "wil@example.com",
"committer_name": "Wil Gieseler",
"committer_email": "wil@example.com",
"message": "depend on simplecov >= 0.7"
},
"branch": "master",
"remotes": [{
"name": "origin",
"url": "https://github.com/lemurheavy/coveralls-ruby.git"
}]
}
From https://github.com/coagulant/coveralls-python (with MIT license).
"""
repo = Repository(cwd)
if not repo.valid():
return {}
return {
'head': {
'id': repo.gitlog('%H'),
'author_name': repo.gitlog('%aN'),
'author_email': repo.gitlog('%ae'),
'committer_name': repo.gitlog('%cN'),
'committer_email': repo.gitlog('%ce'),
'message': repo.gitlog('%s')
},
'branch': os.environ.get('TRAVIS_BRANCH',
os.environ.get('APPVEYOR_REPO_BRANCH',
repo.git('rev-parse', '--abbrev-ref', 'HEAD')[1].strip())),
'remotes': [{'name': line.split()[0], 'url': line.split()[1]}
for line in repo.git('remote', '-v')[1] if '(fetch)' in line]
} | python | def gitrepo(cwd):
"""Return hash of Git data that can be used to display more information to
users.
Example:
"git": {
"head": {
"id": "5e837ce92220be64821128a70f6093f836dd2c05",
"author_name": "Wil Gieseler",
"author_email": "wil@example.com",
"committer_name": "Wil Gieseler",
"committer_email": "wil@example.com",
"message": "depend on simplecov >= 0.7"
},
"branch": "master",
"remotes": [{
"name": "origin",
"url": "https://github.com/lemurheavy/coveralls-ruby.git"
}]
}
From https://github.com/coagulant/coveralls-python (with MIT license).
"""
repo = Repository(cwd)
if not repo.valid():
return {}
return {
'head': {
'id': repo.gitlog('%H'),
'author_name': repo.gitlog('%aN'),
'author_email': repo.gitlog('%ae'),
'committer_name': repo.gitlog('%cN'),
'committer_email': repo.gitlog('%ce'),
'message': repo.gitlog('%s')
},
'branch': os.environ.get('TRAVIS_BRANCH',
os.environ.get('APPVEYOR_REPO_BRANCH',
repo.git('rev-parse', '--abbrev-ref', 'HEAD')[1].strip())),
'remotes': [{'name': line.split()[0], 'url': line.split()[1]}
for line in repo.git('remote', '-v')[1] if '(fetch)' in line]
} | [
"def",
"gitrepo",
"(",
"cwd",
")",
":",
"repo",
"=",
"Repository",
"(",
"cwd",
")",
"if",
"not",
"repo",
".",
"valid",
"(",
")",
":",
"return",
"{",
"}",
"return",
"{",
"'head'",
":",
"{",
"'id'",
":",
"repo",
".",
"gitlog",
"(",
"'%H'",
")",
",",
"'author_name'",
":",
"repo",
".",
"gitlog",
"(",
"'%aN'",
")",
",",
"'author_email'",
":",
"repo",
".",
"gitlog",
"(",
"'%ae'",
")",
",",
"'committer_name'",
":",
"repo",
".",
"gitlog",
"(",
"'%cN'",
")",
",",
"'committer_email'",
":",
"repo",
".",
"gitlog",
"(",
"'%ce'",
")",
",",
"'message'",
":",
"repo",
".",
"gitlog",
"(",
"'%s'",
")",
"}",
",",
"'branch'",
":",
"os",
".",
"environ",
".",
"get",
"(",
"'TRAVIS_BRANCH'",
",",
"os",
".",
"environ",
".",
"get",
"(",
"'APPVEYOR_REPO_BRANCH'",
",",
"repo",
".",
"git",
"(",
"'rev-parse'",
",",
"'--abbrev-ref'",
",",
"'HEAD'",
")",
"[",
"1",
"]",
".",
"strip",
"(",
")",
")",
")",
",",
"'remotes'",
":",
"[",
"{",
"'name'",
":",
"line",
".",
"split",
"(",
")",
"[",
"0",
"]",
",",
"'url'",
":",
"line",
".",
"split",
"(",
")",
"[",
"1",
"]",
"}",
"for",
"line",
"in",
"repo",
".",
"git",
"(",
"'remote'",
",",
"'-v'",
")",
"[",
"1",
"]",
"if",
"'(fetch)'",
"in",
"line",
"]",
"}"
] | Return hash of Git data that can be used to display more information to
users.
Example:
"git": {
"head": {
"id": "5e837ce92220be64821128a70f6093f836dd2c05",
"author_name": "Wil Gieseler",
"author_email": "wil@example.com",
"committer_name": "Wil Gieseler",
"committer_email": "wil@example.com",
"message": "depend on simplecov >= 0.7"
},
"branch": "master",
"remotes": [{
"name": "origin",
"url": "https://github.com/lemurheavy/coveralls-ruby.git"
}]
}
From https://github.com/coagulant/coveralls-python (with MIT license). | [
"Return",
"hash",
"of",
"Git",
"data",
"that",
"can",
"be",
"used",
"to",
"display",
"more",
"information",
"to",
"users",
"."
] | ff7af7eea2a23828f6ab2541667ea04f94344dce | https://github.com/eddyxu/cpp-coveralls/blob/ff7af7eea2a23828f6ab2541667ea04f94344dce/cpp_coveralls/gitrepo.py#L7-L49 | train | 237,344 |
vitiral/gpio | gpio.py | _verify | def _verify(function):
"""decorator to ensure pin is properly set up"""
# @functools.wraps
def wrapped(pin, *args, **kwargs):
pin = int(pin)
if pin not in _open:
ppath = gpiopath(pin)
if not os.path.exists(ppath):
log.debug("Creating Pin {0}".format(pin))
with _export_lock:
with open(pjoin(gpio_root, 'export'), 'w') as f:
_write(f, pin)
value = open(pjoin(ppath, 'value'), FMODE)
direction = open(pjoin(ppath, 'direction'), FMODE)
_open[pin] = PinState(value=value, direction=direction)
return function(pin, *args, **kwargs)
return wrapped | python | def _verify(function):
"""decorator to ensure pin is properly set up"""
# @functools.wraps
def wrapped(pin, *args, **kwargs):
pin = int(pin)
if pin not in _open:
ppath = gpiopath(pin)
if not os.path.exists(ppath):
log.debug("Creating Pin {0}".format(pin))
with _export_lock:
with open(pjoin(gpio_root, 'export'), 'w') as f:
_write(f, pin)
value = open(pjoin(ppath, 'value'), FMODE)
direction = open(pjoin(ppath, 'direction'), FMODE)
_open[pin] = PinState(value=value, direction=direction)
return function(pin, *args, **kwargs)
return wrapped | [
"def",
"_verify",
"(",
"function",
")",
":",
"# @functools.wraps\r",
"def",
"wrapped",
"(",
"pin",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"pin",
"=",
"int",
"(",
"pin",
")",
"if",
"pin",
"not",
"in",
"_open",
":",
"ppath",
"=",
"gpiopath",
"(",
"pin",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"ppath",
")",
":",
"log",
".",
"debug",
"(",
"\"Creating Pin {0}\"",
".",
"format",
"(",
"pin",
")",
")",
"with",
"_export_lock",
":",
"with",
"open",
"(",
"pjoin",
"(",
"gpio_root",
",",
"'export'",
")",
",",
"'w'",
")",
"as",
"f",
":",
"_write",
"(",
"f",
",",
"pin",
")",
"value",
"=",
"open",
"(",
"pjoin",
"(",
"ppath",
",",
"'value'",
")",
",",
"FMODE",
")",
"direction",
"=",
"open",
"(",
"pjoin",
"(",
"ppath",
",",
"'direction'",
")",
",",
"FMODE",
")",
"_open",
"[",
"pin",
"]",
"=",
"PinState",
"(",
"value",
"=",
"value",
",",
"direction",
"=",
"direction",
")",
"return",
"function",
"(",
"pin",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapped"
] | decorator to ensure pin is properly set up | [
"decorator",
"to",
"ensure",
"pin",
"is",
"properly",
"set",
"up"
] | d4d8bdc6965295b978eca882e2e2e5a1b35e047b | https://github.com/vitiral/gpio/blob/d4d8bdc6965295b978eca882e2e2e5a1b35e047b/gpio.py#L54-L70 | train | 237,345 |
vitiral/gpio | gpio.py | set | def set(pin, value):
'''set the pin value to 0 or 1'''
if value is LOW:
value = 0
value = int(bool(value))
log.debug("Write {0}: {1}".format(pin, value))
f = _open[pin].value
_write(f, value) | python | def set(pin, value):
'''set the pin value to 0 or 1'''
if value is LOW:
value = 0
value = int(bool(value))
log.debug("Write {0}: {1}".format(pin, value))
f = _open[pin].value
_write(f, value) | [
"def",
"set",
"(",
"pin",
",",
"value",
")",
":",
"if",
"value",
"is",
"LOW",
":",
"value",
"=",
"0",
"value",
"=",
"int",
"(",
"bool",
"(",
"value",
")",
")",
"log",
".",
"debug",
"(",
"\"Write {0}: {1}\"",
".",
"format",
"(",
"pin",
",",
"value",
")",
")",
"f",
"=",
"_open",
"[",
"pin",
"]",
".",
"value",
"_write",
"(",
"f",
",",
"value",
")"
] | set the pin value to 0 or 1 | [
"set",
"the",
"pin",
"value",
"to",
"0",
"or",
"1"
] | d4d8bdc6965295b978eca882e2e2e5a1b35e047b | https://github.com/vitiral/gpio/blob/d4d8bdc6965295b978eca882e2e2e5a1b35e047b/gpio.py#L158-L165 | train | 237,346 |
fhs/pyhdf | pyhdf/V.py | V.end | def end(self):
"""Close the V interface.
Args::
No argument
Returns::
None
C library equivalent : Vend
"""
# Note: Vend is just a macro; use 'Vfinish' instead
# Note also the the same C function is used to end
# the VS interface
_checkErr('vend', _C.Vfinish(self._hdf_inst._id),
"cannot terminate V interface")
self._hdf_inst = None | python | def end(self):
"""Close the V interface.
Args::
No argument
Returns::
None
C library equivalent : Vend
"""
# Note: Vend is just a macro; use 'Vfinish' instead
# Note also the the same C function is used to end
# the VS interface
_checkErr('vend', _C.Vfinish(self._hdf_inst._id),
"cannot terminate V interface")
self._hdf_inst = None | [
"def",
"end",
"(",
"self",
")",
":",
"# Note: Vend is just a macro; use 'Vfinish' instead",
"# Note also the the same C function is used to end",
"# the VS interface",
"_checkErr",
"(",
"'vend'",
",",
"_C",
".",
"Vfinish",
"(",
"self",
".",
"_hdf_inst",
".",
"_id",
")",
",",
"\"cannot terminate V interface\"",
")",
"self",
".",
"_hdf_inst",
"=",
"None"
] | Close the V interface.
Args::
No argument
Returns::
None
C library equivalent : Vend | [
"Close",
"the",
"V",
"interface",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/V.py#L704-L723 | train | 237,347 |
fhs/pyhdf | pyhdf/V.py | V.attach | def attach(self, num_name, write=0):
"""Open an existing vgroup given its name or its reference
number, or create a new vgroup, returning a VG instance for
that vgroup.
Args::
num_name reference number or name of the vgroup to open,
or -1 to create a new vgroup; vcreate() can also
be called to create and name a new vgroup
write set to non-zero to open the vgroup in write mode
and to 0 to open it in readonly mode (default)
Returns::
VG instance for the vgroup
An exception is raised if an attempt is made to open
a non-existent vgroup.
C library equivalent : Vattach
"""
if isinstance(num_name, bytes):
num = self.find(num_name)
else:
num = num_name
vg_id = _C.Vattach(self._hdf_inst._id, num,
write and 'w' or 'r')
_checkErr('vattach', vg_id, "cannot attach Vgroup")
return VG(self, vg_id) | python | def attach(self, num_name, write=0):
"""Open an existing vgroup given its name or its reference
number, or create a new vgroup, returning a VG instance for
that vgroup.
Args::
num_name reference number or name of the vgroup to open,
or -1 to create a new vgroup; vcreate() can also
be called to create and name a new vgroup
write set to non-zero to open the vgroup in write mode
and to 0 to open it in readonly mode (default)
Returns::
VG instance for the vgroup
An exception is raised if an attempt is made to open
a non-existent vgroup.
C library equivalent : Vattach
"""
if isinstance(num_name, bytes):
num = self.find(num_name)
else:
num = num_name
vg_id = _C.Vattach(self._hdf_inst._id, num,
write and 'w' or 'r')
_checkErr('vattach', vg_id, "cannot attach Vgroup")
return VG(self, vg_id) | [
"def",
"attach",
"(",
"self",
",",
"num_name",
",",
"write",
"=",
"0",
")",
":",
"if",
"isinstance",
"(",
"num_name",
",",
"bytes",
")",
":",
"num",
"=",
"self",
".",
"find",
"(",
"num_name",
")",
"else",
":",
"num",
"=",
"num_name",
"vg_id",
"=",
"_C",
".",
"Vattach",
"(",
"self",
".",
"_hdf_inst",
".",
"_id",
",",
"num",
",",
"write",
"and",
"'w'",
"or",
"'r'",
")",
"_checkErr",
"(",
"'vattach'",
",",
"vg_id",
",",
"\"cannot attach Vgroup\"",
")",
"return",
"VG",
"(",
"self",
",",
"vg_id",
")"
] | Open an existing vgroup given its name or its reference
number, or create a new vgroup, returning a VG instance for
that vgroup.
Args::
num_name reference number or name of the vgroup to open,
or -1 to create a new vgroup; vcreate() can also
be called to create and name a new vgroup
write set to non-zero to open the vgroup in write mode
and to 0 to open it in readonly mode (default)
Returns::
VG instance for the vgroup
An exception is raised if an attempt is made to open
a non-existent vgroup.
C library equivalent : Vattach | [
"Open",
"an",
"existing",
"vgroup",
"given",
"its",
"name",
"or",
"its",
"reference",
"number",
"or",
"create",
"a",
"new",
"vgroup",
"returning",
"a",
"VG",
"instance",
"for",
"that",
"vgroup",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/V.py#L725-L755 | train | 237,348 |
fhs/pyhdf | pyhdf/V.py | V.create | def create(self, name):
"""Create a new vgroup, and assign it a name.
Args::
name name to assign to the new vgroup
Returns::
VG instance for the new vgroup
A create(name) call is equivalent to an attach(-1, 1) call,
followed by a call to the setname(name) method of the instance.
C library equivalent : no equivalent
"""
vg = self.attach(-1, 1)
vg._name = name
return vg | python | def create(self, name):
"""Create a new vgroup, and assign it a name.
Args::
name name to assign to the new vgroup
Returns::
VG instance for the new vgroup
A create(name) call is equivalent to an attach(-1, 1) call,
followed by a call to the setname(name) method of the instance.
C library equivalent : no equivalent
"""
vg = self.attach(-1, 1)
vg._name = name
return vg | [
"def",
"create",
"(",
"self",
",",
"name",
")",
":",
"vg",
"=",
"self",
".",
"attach",
"(",
"-",
"1",
",",
"1",
")",
"vg",
".",
"_name",
"=",
"name",
"return",
"vg"
] | Create a new vgroup, and assign it a name.
Args::
name name to assign to the new vgroup
Returns::
VG instance for the new vgroup
A create(name) call is equivalent to an attach(-1, 1) call,
followed by a call to the setname(name) method of the instance.
C library equivalent : no equivalent | [
"Create",
"a",
"new",
"vgroup",
"and",
"assign",
"it",
"a",
"name",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/V.py#L757-L776 | train | 237,349 |
fhs/pyhdf | pyhdf/V.py | V.find | def find(self, name):
"""Find a vgroup given its name, returning its reference
number if found.
Args::
name name of the vgroup to find
Returns::
vgroup reference number
An exception is raised if the vgroup is not found.
C library equivalent: Vfind
"""
refnum = _C.Vfind(self._hdf_inst._id, name)
if not refnum:
raise HDF4Error("vgroup not found")
return refnum | python | def find(self, name):
"""Find a vgroup given its name, returning its reference
number if found.
Args::
name name of the vgroup to find
Returns::
vgroup reference number
An exception is raised if the vgroup is not found.
C library equivalent: Vfind
"""
refnum = _C.Vfind(self._hdf_inst._id, name)
if not refnum:
raise HDF4Error("vgroup not found")
return refnum | [
"def",
"find",
"(",
"self",
",",
"name",
")",
":",
"refnum",
"=",
"_C",
".",
"Vfind",
"(",
"self",
".",
"_hdf_inst",
".",
"_id",
",",
"name",
")",
"if",
"not",
"refnum",
":",
"raise",
"HDF4Error",
"(",
"\"vgroup not found\"",
")",
"return",
"refnum"
] | Find a vgroup given its name, returning its reference
number if found.
Args::
name name of the vgroup to find
Returns::
vgroup reference number
An exception is raised if the vgroup is not found.
C library equivalent: Vfind | [
"Find",
"a",
"vgroup",
"given",
"its",
"name",
"returning",
"its",
"reference",
"number",
"if",
"found",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/V.py#L778-L798 | train | 237,350 |
fhs/pyhdf | pyhdf/V.py | V.findclass | def findclass(self, name):
"""Find a vgroup given its class name, returning its reference
number if found.
Args::
name class name of the vgroup to find
Returns::
vgroup reference number
An exception is raised if the vgroup is not found.
C library equivalent: Vfind
"""
refnum = _C.Vfindclass(self._hdf_inst._id, name)
if not refnum:
raise HDF4Error("vgroup not found")
return refnum | python | def findclass(self, name):
"""Find a vgroup given its class name, returning its reference
number if found.
Args::
name class name of the vgroup to find
Returns::
vgroup reference number
An exception is raised if the vgroup is not found.
C library equivalent: Vfind
"""
refnum = _C.Vfindclass(self._hdf_inst._id, name)
if not refnum:
raise HDF4Error("vgroup not found")
return refnum | [
"def",
"findclass",
"(",
"self",
",",
"name",
")",
":",
"refnum",
"=",
"_C",
".",
"Vfindclass",
"(",
"self",
".",
"_hdf_inst",
".",
"_id",
",",
"name",
")",
"if",
"not",
"refnum",
":",
"raise",
"HDF4Error",
"(",
"\"vgroup not found\"",
")",
"return",
"refnum"
] | Find a vgroup given its class name, returning its reference
number if found.
Args::
name class name of the vgroup to find
Returns::
vgroup reference number
An exception is raised if the vgroup is not found.
C library equivalent: Vfind | [
"Find",
"a",
"vgroup",
"given",
"its",
"class",
"name",
"returning",
"its",
"reference",
"number",
"if",
"found",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/V.py#L800-L820 | train | 237,351 |
fhs/pyhdf | pyhdf/V.py | V.delete | def delete(self, num_name):
"""Delete from the HDF file the vgroup identified by its
reference number or its name.
Args::
num_name either the reference number or the name of
the vgroup to delete
Returns::
None
C library equivalent : Vdelete
"""
try:
vg = self.attach(num_name, 1)
except HDF4Error as msg:
raise HDF4Error("delete: no such vgroup")
# ATTENTION: The HDF documentation says that the vgroup_id
# is passed to Vdelete(). This is wrong.
# The vgroup reference number must instead be passed.
refnum = vg._refnum
vg.detach()
_checkErr('delete', _C.Vdelete(self._hdf_inst._id, refnum),
"error deleting vgroup") | python | def delete(self, num_name):
"""Delete from the HDF file the vgroup identified by its
reference number or its name.
Args::
num_name either the reference number or the name of
the vgroup to delete
Returns::
None
C library equivalent : Vdelete
"""
try:
vg = self.attach(num_name, 1)
except HDF4Error as msg:
raise HDF4Error("delete: no such vgroup")
# ATTENTION: The HDF documentation says that the vgroup_id
# is passed to Vdelete(). This is wrong.
# The vgroup reference number must instead be passed.
refnum = vg._refnum
vg.detach()
_checkErr('delete', _C.Vdelete(self._hdf_inst._id, refnum),
"error deleting vgroup") | [
"def",
"delete",
"(",
"self",
",",
"num_name",
")",
":",
"try",
":",
"vg",
"=",
"self",
".",
"attach",
"(",
"num_name",
",",
"1",
")",
"except",
"HDF4Error",
"as",
"msg",
":",
"raise",
"HDF4Error",
"(",
"\"delete: no such vgroup\"",
")",
"# ATTENTION: The HDF documentation says that the vgroup_id",
"# is passed to Vdelete(). This is wrong.",
"# The vgroup reference number must instead be passed.",
"refnum",
"=",
"vg",
".",
"_refnum",
"vg",
".",
"detach",
"(",
")",
"_checkErr",
"(",
"'delete'",
",",
"_C",
".",
"Vdelete",
"(",
"self",
".",
"_hdf_inst",
".",
"_id",
",",
"refnum",
")",
",",
"\"error deleting vgroup\"",
")"
] | Delete from the HDF file the vgroup identified by its
reference number or its name.
Args::
num_name either the reference number or the name of
the vgroup to delete
Returns::
None
C library equivalent : Vdelete | [
"Delete",
"from",
"the",
"HDF",
"file",
"the",
"vgroup",
"identified",
"by",
"its",
"reference",
"number",
"or",
"its",
"name",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/V.py#L822-L849 | train | 237,352 |
fhs/pyhdf | pyhdf/V.py | V.getid | def getid(self, ref):
"""Obtain the reference number of the vgroup following the
vgroup with the given reference number .
Args::
ref reference number of the vgroup after which to search;
set to -1 to start the search at the start of
the HDF file
Returns::
reference number of the vgroup past the one identified by 'ref'
An exception is raised if the end of the vgroup is reached.
C library equivalent : Vgetid
"""
num = _C.Vgetid(self._hdf_inst._id, ref)
_checkErr('getid', num, "bad arguments or last vgroup reached")
return num | python | def getid(self, ref):
"""Obtain the reference number of the vgroup following the
vgroup with the given reference number .
Args::
ref reference number of the vgroup after which to search;
set to -1 to start the search at the start of
the HDF file
Returns::
reference number of the vgroup past the one identified by 'ref'
An exception is raised if the end of the vgroup is reached.
C library equivalent : Vgetid
"""
num = _C.Vgetid(self._hdf_inst._id, ref)
_checkErr('getid', num, "bad arguments or last vgroup reached")
return num | [
"def",
"getid",
"(",
"self",
",",
"ref",
")",
":",
"num",
"=",
"_C",
".",
"Vgetid",
"(",
"self",
".",
"_hdf_inst",
".",
"_id",
",",
"ref",
")",
"_checkErr",
"(",
"'getid'",
",",
"num",
",",
"\"bad arguments or last vgroup reached\"",
")",
"return",
"num"
] | Obtain the reference number of the vgroup following the
vgroup with the given reference number .
Args::
ref reference number of the vgroup after which to search;
set to -1 to start the search at the start of
the HDF file
Returns::
reference number of the vgroup past the one identified by 'ref'
An exception is raised if the end of the vgroup is reached.
C library equivalent : Vgetid | [
"Obtain",
"the",
"reference",
"number",
"of",
"the",
"vgroup",
"following",
"the",
"vgroup",
"with",
"the",
"given",
"reference",
"number",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/V.py#L851-L872 | train | 237,353 |
fhs/pyhdf | pyhdf/V.py | VG.insert | def insert(self, inst):
"""Insert a vdata or a vgroup in the vgroup.
Args::
inst vdata or vgroup instance to add
Returns::
index of the inserted vdata or vgroup (0 based)
C library equivalent : Vinsert
"""
if isinstance(inst, VD):
id = inst._id
elif isinstance(inst, VG):
id = inst._id
else:
raise HDF4Error("insrt: bad argument")
index = _C.Vinsert(self._id, id)
_checkErr('insert', index, "cannot insert in vgroup")
return index | python | def insert(self, inst):
"""Insert a vdata or a vgroup in the vgroup.
Args::
inst vdata or vgroup instance to add
Returns::
index of the inserted vdata or vgroup (0 based)
C library equivalent : Vinsert
"""
if isinstance(inst, VD):
id = inst._id
elif isinstance(inst, VG):
id = inst._id
else:
raise HDF4Error("insrt: bad argument")
index = _C.Vinsert(self._id, id)
_checkErr('insert', index, "cannot insert in vgroup")
return index | [
"def",
"insert",
"(",
"self",
",",
"inst",
")",
":",
"if",
"isinstance",
"(",
"inst",
",",
"VD",
")",
":",
"id",
"=",
"inst",
".",
"_id",
"elif",
"isinstance",
"(",
"inst",
",",
"VG",
")",
":",
"id",
"=",
"inst",
".",
"_id",
"else",
":",
"raise",
"HDF4Error",
"(",
"\"insrt: bad argument\"",
")",
"index",
"=",
"_C",
".",
"Vinsert",
"(",
"self",
".",
"_id",
",",
"id",
")",
"_checkErr",
"(",
"'insert'",
",",
"index",
",",
"\"cannot insert in vgroup\"",
")",
"return",
"index"
] | Insert a vdata or a vgroup in the vgroup.
Args::
inst vdata or vgroup instance to add
Returns::
index of the inserted vdata or vgroup (0 based)
C library equivalent : Vinsert | [
"Insert",
"a",
"vdata",
"or",
"a",
"vgroup",
"in",
"the",
"vgroup",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/V.py#L994-L1017 | train | 237,354 |
fhs/pyhdf | pyhdf/V.py | VG.add | def add(self, tag, ref):
"""Add to the vgroup an object identified by its tag and
reference number.
Args::
tag tag of the object to add
ref reference number of the object to add
Returns::
total number of objects in the vgroup after the addition
C library equivalent : Vaddtagref
"""
n = _C.Vaddtagref(self._id, tag, ref)
_checkErr('addtagref', n, 'invalid arguments')
return n | python | def add(self, tag, ref):
"""Add to the vgroup an object identified by its tag and
reference number.
Args::
tag tag of the object to add
ref reference number of the object to add
Returns::
total number of objects in the vgroup after the addition
C library equivalent : Vaddtagref
"""
n = _C.Vaddtagref(self._id, tag, ref)
_checkErr('addtagref', n, 'invalid arguments')
return n | [
"def",
"add",
"(",
"self",
",",
"tag",
",",
"ref",
")",
":",
"n",
"=",
"_C",
".",
"Vaddtagref",
"(",
"self",
".",
"_id",
",",
"tag",
",",
"ref",
")",
"_checkErr",
"(",
"'addtagref'",
",",
"n",
",",
"'invalid arguments'",
")",
"return",
"n"
] | Add to the vgroup an object identified by its tag and
reference number.
Args::
tag tag of the object to add
ref reference number of the object to add
Returns::
total number of objects in the vgroup after the addition
C library equivalent : Vaddtagref | [
"Add",
"to",
"the",
"vgroup",
"an",
"object",
"identified",
"by",
"its",
"tag",
"and",
"reference",
"number",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/V.py#L1019-L1037 | train | 237,355 |
fhs/pyhdf | pyhdf/V.py | VG.delete | def delete(self, tag, ref):
"""Delete from the vgroup the member identified by its tag
and reference number.
Args::
tag tag of the member to delete
ref reference number of the member to delete
Returns::
None
Only the link of the member with the vgroup is deleted.
The member object is not deleted.
C library equivalent : Vdeletatagref
"""
_checkErr('delete', _C.Vdeletetagref(self._id, tag, ref),
"error deleting member") | python | def delete(self, tag, ref):
"""Delete from the vgroup the member identified by its tag
and reference number.
Args::
tag tag of the member to delete
ref reference number of the member to delete
Returns::
None
Only the link of the member with the vgroup is deleted.
The member object is not deleted.
C library equivalent : Vdeletatagref
"""
_checkErr('delete', _C.Vdeletetagref(self._id, tag, ref),
"error deleting member") | [
"def",
"delete",
"(",
"self",
",",
"tag",
",",
"ref",
")",
":",
"_checkErr",
"(",
"'delete'",
",",
"_C",
".",
"Vdeletetagref",
"(",
"self",
".",
"_id",
",",
"tag",
",",
"ref",
")",
",",
"\"error deleting member\"",
")"
] | Delete from the vgroup the member identified by its tag
and reference number.
Args::
tag tag of the member to delete
ref reference number of the member to delete
Returns::
None
Only the link of the member with the vgroup is deleted.
The member object is not deleted.
C library equivalent : Vdeletatagref | [
"Delete",
"from",
"the",
"vgroup",
"the",
"member",
"identified",
"by",
"its",
"tag",
"and",
"reference",
"number",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/V.py#L1039-L1059 | train | 237,356 |
fhs/pyhdf | pyhdf/V.py | VG.tagref | def tagref(self, index):
"""Get the tag and reference number of a vgroup member,
given the index number of that member.
Args::
index member index (0 based)
Returns::
2-element tuple:
- member tag
- member reference number
C library equivalent : Vgettagref
"""
status, tag, ref = _C.Vgettagref(self._id, index)
_checkErr('tagref', status, "illegal arguments")
return tag, ref | python | def tagref(self, index):
"""Get the tag and reference number of a vgroup member,
given the index number of that member.
Args::
index member index (0 based)
Returns::
2-element tuple:
- member tag
- member reference number
C library equivalent : Vgettagref
"""
status, tag, ref = _C.Vgettagref(self._id, index)
_checkErr('tagref', status, "illegal arguments")
return tag, ref | [
"def",
"tagref",
"(",
"self",
",",
"index",
")",
":",
"status",
",",
"tag",
",",
"ref",
"=",
"_C",
".",
"Vgettagref",
"(",
"self",
".",
"_id",
",",
"index",
")",
"_checkErr",
"(",
"'tagref'",
",",
"status",
",",
"\"illegal arguments\"",
")",
"return",
"tag",
",",
"ref"
] | Get the tag and reference number of a vgroup member,
given the index number of that member.
Args::
index member index (0 based)
Returns::
2-element tuple:
- member tag
- member reference number
C library equivalent : Vgettagref | [
"Get",
"the",
"tag",
"and",
"reference",
"number",
"of",
"a",
"vgroup",
"member",
"given",
"the",
"index",
"number",
"of",
"that",
"member",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/V.py#L1079-L1098 | train | 237,357 |
fhs/pyhdf | pyhdf/V.py | VG.tagrefs | def tagrefs(self):
"""Get the tags and reference numbers of all the vgroup
members.
Args::
no argument
Returns::
list of (tag,ref) tuples, one for each vgroup member
C library equivalent : Vgettagrefs
"""
n = self._nmembers
ret = []
if n:
tags = _C.array_int32(n)
refs = _C.array_int32(n)
k = _C.Vgettagrefs(self._id, tags, refs, n)
_checkErr('tagrefs', k, "error getting tags and refs")
for m in xrange(k):
ret.append((tags[m], refs[m]))
return ret | python | def tagrefs(self):
"""Get the tags and reference numbers of all the vgroup
members.
Args::
no argument
Returns::
list of (tag,ref) tuples, one for each vgroup member
C library equivalent : Vgettagrefs
"""
n = self._nmembers
ret = []
if n:
tags = _C.array_int32(n)
refs = _C.array_int32(n)
k = _C.Vgettagrefs(self._id, tags, refs, n)
_checkErr('tagrefs', k, "error getting tags and refs")
for m in xrange(k):
ret.append((tags[m], refs[m]))
return ret | [
"def",
"tagrefs",
"(",
"self",
")",
":",
"n",
"=",
"self",
".",
"_nmembers",
"ret",
"=",
"[",
"]",
"if",
"n",
":",
"tags",
"=",
"_C",
".",
"array_int32",
"(",
"n",
")",
"refs",
"=",
"_C",
".",
"array_int32",
"(",
"n",
")",
"k",
"=",
"_C",
".",
"Vgettagrefs",
"(",
"self",
".",
"_id",
",",
"tags",
",",
"refs",
",",
"n",
")",
"_checkErr",
"(",
"'tagrefs'",
",",
"k",
",",
"\"error getting tags and refs\"",
")",
"for",
"m",
"in",
"xrange",
"(",
"k",
")",
":",
"ret",
".",
"append",
"(",
"(",
"tags",
"[",
"m",
"]",
",",
"refs",
"[",
"m",
"]",
")",
")",
"return",
"ret"
] | Get the tags and reference numbers of all the vgroup
members.
Args::
no argument
Returns::
list of (tag,ref) tuples, one for each vgroup member
C library equivalent : Vgettagrefs | [
"Get",
"the",
"tags",
"and",
"reference",
"numbers",
"of",
"all",
"the",
"vgroup",
"members",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/V.py#L1100-L1124 | train | 237,358 |
fhs/pyhdf | pyhdf/V.py | VG.inqtagref | def inqtagref(self, tag, ref):
"""Determines if an object identified by its tag and reference
number belongs to the vgroup.
Args::
tag tag of the object to check
ref reference number of the object to check
Returns::
False (0) if the object does not belong to the vgroup,
True (1) otherwise
C library equivalent : Vinqtagref
"""
return _C.Vinqtagref(self._id, tag, ref) | python | def inqtagref(self, tag, ref):
"""Determines if an object identified by its tag and reference
number belongs to the vgroup.
Args::
tag tag of the object to check
ref reference number of the object to check
Returns::
False (0) if the object does not belong to the vgroup,
True (1) otherwise
C library equivalent : Vinqtagref
"""
return _C.Vinqtagref(self._id, tag, ref) | [
"def",
"inqtagref",
"(",
"self",
",",
"tag",
",",
"ref",
")",
":",
"return",
"_C",
".",
"Vinqtagref",
"(",
"self",
".",
"_id",
",",
"tag",
",",
"ref",
")"
] | Determines if an object identified by its tag and reference
number belongs to the vgroup.
Args::
tag tag of the object to check
ref reference number of the object to check
Returns::
False (0) if the object does not belong to the vgroup,
True (1) otherwise
C library equivalent : Vinqtagref | [
"Determines",
"if",
"an",
"object",
"identified",
"by",
"its",
"tag",
"and",
"reference",
"number",
"belongs",
"to",
"the",
"vgroup",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/V.py#L1126-L1143 | train | 237,359 |
fhs/pyhdf | pyhdf/V.py | VG.nrefs | def nrefs(self, tag):
"""Determine the number of tags of a given type in a vgroup.
Args::
tag tag type to look for in the vgroup
Returns::
number of members identified by this tag type
C library equivalent : Vnrefs
"""
n = _C.Vnrefs(self._id, tag)
_checkErr('nrefs', n, "bad arguments")
return n | python | def nrefs(self, tag):
"""Determine the number of tags of a given type in a vgroup.
Args::
tag tag type to look for in the vgroup
Returns::
number of members identified by this tag type
C library equivalent : Vnrefs
"""
n = _C.Vnrefs(self._id, tag)
_checkErr('nrefs', n, "bad arguments")
return n | [
"def",
"nrefs",
"(",
"self",
",",
"tag",
")",
":",
"n",
"=",
"_C",
".",
"Vnrefs",
"(",
"self",
".",
"_id",
",",
"tag",
")",
"_checkErr",
"(",
"'nrefs'",
",",
"n",
",",
"\"bad arguments\"",
")",
"return",
"n"
] | Determine the number of tags of a given type in a vgroup.
Args::
tag tag type to look for in the vgroup
Returns::
number of members identified by this tag type
C library equivalent : Vnrefs | [
"Determine",
"the",
"number",
"of",
"tags",
"of",
"a",
"given",
"type",
"in",
"a",
"vgroup",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/V.py#L1145-L1161 | train | 237,360 |
fhs/pyhdf | pyhdf/V.py | VG.attrinfo | def attrinfo(self):
"""Return info about all the vgroup attributes.
Args::
no argument
Returns::
dictionnary describing each vgroup attribute; for each attribute,
a (name,data) pair is added to the dictionary, where 'data' is
a tuple holding:
- attribute data type (one of HC.xxx constants)
- attribute order
- attribute value
- attribute size in bytes
C library equivalent : no equivalent
"""
dic = {}
for n in range(self._nattrs):
att = self.attr(n)
name, type, order, size = att.info()
dic[name] = (type, order, att.get(), size)
return dic | python | def attrinfo(self):
"""Return info about all the vgroup attributes.
Args::
no argument
Returns::
dictionnary describing each vgroup attribute; for each attribute,
a (name,data) pair is added to the dictionary, where 'data' is
a tuple holding:
- attribute data type (one of HC.xxx constants)
- attribute order
- attribute value
- attribute size in bytes
C library equivalent : no equivalent
"""
dic = {}
for n in range(self._nattrs):
att = self.attr(n)
name, type, order, size = att.info()
dic[name] = (type, order, att.get(), size)
return dic | [
"def",
"attrinfo",
"(",
"self",
")",
":",
"dic",
"=",
"{",
"}",
"for",
"n",
"in",
"range",
"(",
"self",
".",
"_nattrs",
")",
":",
"att",
"=",
"self",
".",
"attr",
"(",
"n",
")",
"name",
",",
"type",
",",
"order",
",",
"size",
"=",
"att",
".",
"info",
"(",
")",
"dic",
"[",
"name",
"]",
"=",
"(",
"type",
",",
"order",
",",
"att",
".",
"get",
"(",
")",
",",
"size",
")",
"return",
"dic"
] | Return info about all the vgroup attributes.
Args::
no argument
Returns::
dictionnary describing each vgroup attribute; for each attribute,
a (name,data) pair is added to the dictionary, where 'data' is
a tuple holding:
- attribute data type (one of HC.xxx constants)
- attribute order
- attribute value
- attribute size in bytes
C library equivalent : no equivalent | [
"Return",
"info",
"about",
"all",
"the",
"vgroup",
"attributes",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/V.py#L1218-L1245 | train | 237,361 |
fhs/pyhdf | pyhdf/V.py | VG.findattr | def findattr(self, name):
"""Search the vgroup for a given attribute.
Args::
name attribute name
Returns::
if found, VGAttr instance describing the attribute
None otherwise
C library equivalent : Vfindattr
"""
try:
att = self.attr(name)
if att._index is None:
att = None
except HDF4Error:
att = None
return att | python | def findattr(self, name):
"""Search the vgroup for a given attribute.
Args::
name attribute name
Returns::
if found, VGAttr instance describing the attribute
None otherwise
C library equivalent : Vfindattr
"""
try:
att = self.attr(name)
if att._index is None:
att = None
except HDF4Error:
att = None
return att | [
"def",
"findattr",
"(",
"self",
",",
"name",
")",
":",
"try",
":",
"att",
"=",
"self",
".",
"attr",
"(",
"name",
")",
"if",
"att",
".",
"_index",
"is",
"None",
":",
"att",
"=",
"None",
"except",
"HDF4Error",
":",
"att",
"=",
"None",
"return",
"att"
] | Search the vgroup for a given attribute.
Args::
name attribute name
Returns::
if found, VGAttr instance describing the attribute
None otherwise
C library equivalent : Vfindattr | [
"Search",
"the",
"vgroup",
"for",
"a",
"given",
"attribute",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/V.py#L1248-L1269 | train | 237,362 |
fhs/pyhdf | pyhdf/SD.py | SDAttr.index | def index(self):
"""Retrieve the attribute index number.
Args::
no argument
Returns::
attribute index number (starting at 0)
C library equivalent : SDfindattr
"""
self._index = _C.SDfindattr(self._obj._id, self._name)
_checkErr('find', self._index, 'illegal attribute name')
return self._index | python | def index(self):
"""Retrieve the attribute index number.
Args::
no argument
Returns::
attribute index number (starting at 0)
C library equivalent : SDfindattr
"""
self._index = _C.SDfindattr(self._obj._id, self._name)
_checkErr('find', self._index, 'illegal attribute name')
return self._index | [
"def",
"index",
"(",
"self",
")",
":",
"self",
".",
"_index",
"=",
"_C",
".",
"SDfindattr",
"(",
"self",
".",
"_obj",
".",
"_id",
",",
"self",
".",
"_name",
")",
"_checkErr",
"(",
"'find'",
",",
"self",
".",
"_index",
",",
"'illegal attribute name'",
")",
"return",
"self",
".",
"_index"
] | Retrieve the attribute index number.
Args::
no argument
Returns::
attribute index number (starting at 0)
C library equivalent : SDfindattr | [
"Retrieve",
"the",
"attribute",
"index",
"number",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1178-L1194 | train | 237,363 |
fhs/pyhdf | pyhdf/SD.py | SD.end | def end(self):
"""End access to the SD interface and close the HDF file.
Args::
no argument
Returns::
None
The instance should not be used afterwards.
The 'end()' method is implicitly called when the
SD instance is deleted.
C library equivalent : SDend
"""
status = _C.SDend(self._id)
_checkErr('end', status, "cannot execute")
self._id = None | python | def end(self):
"""End access to the SD interface and close the HDF file.
Args::
no argument
Returns::
None
The instance should not be used afterwards.
The 'end()' method is implicitly called when the
SD instance is deleted.
C library equivalent : SDend
"""
status = _C.SDend(self._id)
_checkErr('end', status, "cannot execute")
self._id = None | [
"def",
"end",
"(",
"self",
")",
":",
"status",
"=",
"_C",
".",
"SDend",
"(",
"self",
".",
"_id",
")",
"_checkErr",
"(",
"'end'",
",",
"status",
",",
"\"cannot execute\"",
")",
"self",
".",
"_id",
"=",
"None"
] | End access to the SD interface and close the HDF file.
Args::
no argument
Returns::
None
The instance should not be used afterwards.
The 'end()' method is implicitly called when the
SD instance is deleted.
C library equivalent : SDend | [
"End",
"access",
"to",
"the",
"SD",
"interface",
"and",
"close",
"the",
"HDF",
"file",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1457-L1477 | train | 237,364 |
fhs/pyhdf | pyhdf/SD.py | SD.info | def info(self):
"""Retrieve information about the SD interface.
Args::
no argument
Returns::
2-element tuple holding:
number of datasets inside the file
number of file attributes
C library equivalent : SDfileinfo
"""
status, n_datasets, n_file_attrs = _C.SDfileinfo(self._id)
_checkErr('info', status, "cannot execute")
return n_datasets, n_file_attrs | python | def info(self):
"""Retrieve information about the SD interface.
Args::
no argument
Returns::
2-element tuple holding:
number of datasets inside the file
number of file attributes
C library equivalent : SDfileinfo
"""
status, n_datasets, n_file_attrs = _C.SDfileinfo(self._id)
_checkErr('info', status, "cannot execute")
return n_datasets, n_file_attrs | [
"def",
"info",
"(",
"self",
")",
":",
"status",
",",
"n_datasets",
",",
"n_file_attrs",
"=",
"_C",
".",
"SDfileinfo",
"(",
"self",
".",
"_id",
")",
"_checkErr",
"(",
"'info'",
",",
"status",
",",
"\"cannot execute\"",
")",
"return",
"n_datasets",
",",
"n_file_attrs"
] | Retrieve information about the SD interface.
Args::
no argument
Returns::
2-element tuple holding:
number of datasets inside the file
number of file attributes
C library equivalent : SDfileinfo | [
"Retrieve",
"information",
"about",
"the",
"SD",
"interface",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1479-L1497 | train | 237,365 |
fhs/pyhdf | pyhdf/SD.py | SD.nametoindex | def nametoindex(self, sds_name):
"""Return the index number of a dataset given the dataset name.
Args::
sds_name : dataset name
Returns::
index number of the dataset
C library equivalent : SDnametoindex
"""
sds_idx = _C.SDnametoindex(self._id, sds_name)
_checkErr('nametoindex', sds_idx, 'non existent SDS')
return sds_idx | python | def nametoindex(self, sds_name):
"""Return the index number of a dataset given the dataset name.
Args::
sds_name : dataset name
Returns::
index number of the dataset
C library equivalent : SDnametoindex
"""
sds_idx = _C.SDnametoindex(self._id, sds_name)
_checkErr('nametoindex', sds_idx, 'non existent SDS')
return sds_idx | [
"def",
"nametoindex",
"(",
"self",
",",
"sds_name",
")",
":",
"sds_idx",
"=",
"_C",
".",
"SDnametoindex",
"(",
"self",
".",
"_id",
",",
"sds_name",
")",
"_checkErr",
"(",
"'nametoindex'",
",",
"sds_idx",
",",
"'non existent SDS'",
")",
"return",
"sds_idx"
] | Return the index number of a dataset given the dataset name.
Args::
sds_name : dataset name
Returns::
index number of the dataset
C library equivalent : SDnametoindex | [
"Return",
"the",
"index",
"number",
"of",
"a",
"dataset",
"given",
"the",
"dataset",
"name",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1499-L1515 | train | 237,366 |
fhs/pyhdf | pyhdf/SD.py | SD.reftoindex | def reftoindex(self, sds_ref):
"""Returns the index number of a dataset given the dataset
reference number.
Args::
sds_ref : dataset reference number
Returns::
dataset index number
C library equivalent : SDreftoindex
"""
sds_idx = _C.SDreftoindex(self._id, sds_ref)
_checkErr('reftoindex', sds_idx, 'illegal SDS ref number')
return sds_idx | python | def reftoindex(self, sds_ref):
"""Returns the index number of a dataset given the dataset
reference number.
Args::
sds_ref : dataset reference number
Returns::
dataset index number
C library equivalent : SDreftoindex
"""
sds_idx = _C.SDreftoindex(self._id, sds_ref)
_checkErr('reftoindex', sds_idx, 'illegal SDS ref number')
return sds_idx | [
"def",
"reftoindex",
"(",
"self",
",",
"sds_ref",
")",
":",
"sds_idx",
"=",
"_C",
".",
"SDreftoindex",
"(",
"self",
".",
"_id",
",",
"sds_ref",
")",
"_checkErr",
"(",
"'reftoindex'",
",",
"sds_idx",
",",
"'illegal SDS ref number'",
")",
"return",
"sds_idx"
] | Returns the index number of a dataset given the dataset
reference number.
Args::
sds_ref : dataset reference number
Returns::
dataset index number
C library equivalent : SDreftoindex | [
"Returns",
"the",
"index",
"number",
"of",
"a",
"dataset",
"given",
"the",
"dataset",
"reference",
"number",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1517-L1534 | train | 237,367 |
fhs/pyhdf | pyhdf/SD.py | SD.setfillmode | def setfillmode(self, fill_mode):
"""Set the fill mode for all the datasets in the file.
Args::
fill_mode : fill mode; one of :
SDC.FILL write the fill value to all the datasets
of the file by default
SDC.NOFILL do not write fill values to all datasets
of the file by default
Returns::
previous fill mode value
C library equivalent: SDsetfillmode
"""
if not fill_mode in [SDC.FILL, SDC.NOFILL]:
raise HDF4Error("bad fill mode")
old_mode = _C.SDsetfillmode(self._id, fill_mode)
_checkErr('setfillmode', old_mode, 'cannot execute')
return old_mode | python | def setfillmode(self, fill_mode):
"""Set the fill mode for all the datasets in the file.
Args::
fill_mode : fill mode; one of :
SDC.FILL write the fill value to all the datasets
of the file by default
SDC.NOFILL do not write fill values to all datasets
of the file by default
Returns::
previous fill mode value
C library equivalent: SDsetfillmode
"""
if not fill_mode in [SDC.FILL, SDC.NOFILL]:
raise HDF4Error("bad fill mode")
old_mode = _C.SDsetfillmode(self._id, fill_mode)
_checkErr('setfillmode', old_mode, 'cannot execute')
return old_mode | [
"def",
"setfillmode",
"(",
"self",
",",
"fill_mode",
")",
":",
"if",
"not",
"fill_mode",
"in",
"[",
"SDC",
".",
"FILL",
",",
"SDC",
".",
"NOFILL",
"]",
":",
"raise",
"HDF4Error",
"(",
"\"bad fill mode\"",
")",
"old_mode",
"=",
"_C",
".",
"SDsetfillmode",
"(",
"self",
".",
"_id",
",",
"fill_mode",
")",
"_checkErr",
"(",
"'setfillmode'",
",",
"old_mode",
",",
"'cannot execute'",
")",
"return",
"old_mode"
] | Set the fill mode for all the datasets in the file.
Args::
fill_mode : fill mode; one of :
SDC.FILL write the fill value to all the datasets
of the file by default
SDC.NOFILL do not write fill values to all datasets
of the file by default
Returns::
previous fill mode value
C library equivalent: SDsetfillmode | [
"Set",
"the",
"fill",
"mode",
"for",
"all",
"the",
"datasets",
"in",
"the",
"file",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1536-L1558 | train | 237,368 |
fhs/pyhdf | pyhdf/SD.py | SD.select | def select(self, name_or_index):
"""Locate a dataset.
Args::
name_or_index dataset name or index number
Returns::
SDS instance for the dataset
C library equivalent : SDselect
"""
if isinstance(name_or_index, type(1)):
idx = name_or_index
else:
try:
idx = self.nametoindex(name_or_index)
except HDF4Error:
raise HDF4Error("select: non-existent dataset")
id = _C.SDselect(self._id, idx)
_checkErr('select', id, "cannot execute")
return SDS(self, id) | python | def select(self, name_or_index):
"""Locate a dataset.
Args::
name_or_index dataset name or index number
Returns::
SDS instance for the dataset
C library equivalent : SDselect
"""
if isinstance(name_or_index, type(1)):
idx = name_or_index
else:
try:
idx = self.nametoindex(name_or_index)
except HDF4Error:
raise HDF4Error("select: non-existent dataset")
id = _C.SDselect(self._id, idx)
_checkErr('select', id, "cannot execute")
return SDS(self, id) | [
"def",
"select",
"(",
"self",
",",
"name_or_index",
")",
":",
"if",
"isinstance",
"(",
"name_or_index",
",",
"type",
"(",
"1",
")",
")",
":",
"idx",
"=",
"name_or_index",
"else",
":",
"try",
":",
"idx",
"=",
"self",
".",
"nametoindex",
"(",
"name_or_index",
")",
"except",
"HDF4Error",
":",
"raise",
"HDF4Error",
"(",
"\"select: non-existent dataset\"",
")",
"id",
"=",
"_C",
".",
"SDselect",
"(",
"self",
".",
"_id",
",",
"idx",
")",
"_checkErr",
"(",
"'select'",
",",
"id",
",",
"\"cannot execute\"",
")",
"return",
"SDS",
"(",
"self",
",",
"id",
")"
] | Locate a dataset.
Args::
name_or_index dataset name or index number
Returns::
SDS instance for the dataset
C library equivalent : SDselect | [
"Locate",
"a",
"dataset",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1603-L1626 | train | 237,369 |
fhs/pyhdf | pyhdf/SD.py | SD.attributes | def attributes(self, full=0):
"""Return a dictionnary describing every global
attribute attached to the SD interface.
Args::
full true to get complete info about each attribute
false to report only each attribute value
Returns::
Empty dictionnary if no global attribute defined
Otherwise, dictionnary where each key is the name of a
global attribute. If parameter 'full' is false,
key value is the attribute value. If 'full' is true,
key value is a tuple with the following elements:
- attribute value
- attribute index number
- attribute type
- attribute length
C library equivalent : no equivalent
"""
# Get the number of global attributes.
nsds, natts = self.info()
# Inquire each attribute
res = {}
for n in range(natts):
a = self.attr(n)
name, aType, nVal = a.info()
if full:
res[name] = (a.get(), a.index(), aType, nVal)
else:
res[name] = a.get()
return res | python | def attributes(self, full=0):
"""Return a dictionnary describing every global
attribute attached to the SD interface.
Args::
full true to get complete info about each attribute
false to report only each attribute value
Returns::
Empty dictionnary if no global attribute defined
Otherwise, dictionnary where each key is the name of a
global attribute. If parameter 'full' is false,
key value is the attribute value. If 'full' is true,
key value is a tuple with the following elements:
- attribute value
- attribute index number
- attribute type
- attribute length
C library equivalent : no equivalent
"""
# Get the number of global attributes.
nsds, natts = self.info()
# Inquire each attribute
res = {}
for n in range(natts):
a = self.attr(n)
name, aType, nVal = a.info()
if full:
res[name] = (a.get(), a.index(), aType, nVal)
else:
res[name] = a.get()
return res | [
"def",
"attributes",
"(",
"self",
",",
"full",
"=",
"0",
")",
":",
"# Get the number of global attributes.",
"nsds",
",",
"natts",
"=",
"self",
".",
"info",
"(",
")",
"# Inquire each attribute",
"res",
"=",
"{",
"}",
"for",
"n",
"in",
"range",
"(",
"natts",
")",
":",
"a",
"=",
"self",
".",
"attr",
"(",
"n",
")",
"name",
",",
"aType",
",",
"nVal",
"=",
"a",
".",
"info",
"(",
")",
"if",
"full",
":",
"res",
"[",
"name",
"]",
"=",
"(",
"a",
".",
"get",
"(",
")",
",",
"a",
".",
"index",
"(",
")",
",",
"aType",
",",
"nVal",
")",
"else",
":",
"res",
"[",
"name",
"]",
"=",
"a",
".",
"get",
"(",
")",
"return",
"res"
] | Return a dictionnary describing every global
attribute attached to the SD interface.
Args::
full true to get complete info about each attribute
false to report only each attribute value
Returns::
Empty dictionnary if no global attribute defined
Otherwise, dictionnary where each key is the name of a
global attribute. If parameter 'full' is false,
key value is the attribute value. If 'full' is true,
key value is a tuple with the following elements:
- attribute value
- attribute index number
- attribute type
- attribute length
C library equivalent : no equivalent | [
"Return",
"a",
"dictionnary",
"describing",
"every",
"global",
"attribute",
"attached",
"to",
"the",
"SD",
"interface",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1651-L1689 | train | 237,370 |
fhs/pyhdf | pyhdf/SD.py | SD.datasets | def datasets(self):
"""Return a dictionnary describing all the file datasets.
Args::
no argument
Returns::
Empty dictionnary if no dataset is defined.
Otherwise, dictionnary whose keys are the file dataset names,
and values are tuples describing the corresponding datasets.
Each tuple holds the following elements in order:
- tuple holding the names of the dimensions defining the
dataset coordinate axes
- tuple holding the dataset shape (dimension lengths);
if a dimension is unlimited, the reported length corresponds
to the dimension current length
- dataset type
- dataset index number
C library equivalent : no equivalent
"""
# Get number of datasets
nDs = self.info()[0]
# Inquire each var
res = {}
for n in range(nDs):
# Get dataset info.
v = self.select(n)
vName, vRank, vLen, vType, vAtt = v.info()
if vRank < 2: # need a sequence
vLen = [vLen]
# Get dimension info.
dimNames = []
dimLengths = []
for dimNum in range(vRank):
d = v.dim(dimNum)
dimNames.append(d.info()[0])
dimLengths.append(vLen[dimNum])
res[vName] = (tuple(dimNames), tuple(dimLengths),
vType, n)
return res | python | def datasets(self):
"""Return a dictionnary describing all the file datasets.
Args::
no argument
Returns::
Empty dictionnary if no dataset is defined.
Otherwise, dictionnary whose keys are the file dataset names,
and values are tuples describing the corresponding datasets.
Each tuple holds the following elements in order:
- tuple holding the names of the dimensions defining the
dataset coordinate axes
- tuple holding the dataset shape (dimension lengths);
if a dimension is unlimited, the reported length corresponds
to the dimension current length
- dataset type
- dataset index number
C library equivalent : no equivalent
"""
# Get number of datasets
nDs = self.info()[0]
# Inquire each var
res = {}
for n in range(nDs):
# Get dataset info.
v = self.select(n)
vName, vRank, vLen, vType, vAtt = v.info()
if vRank < 2: # need a sequence
vLen = [vLen]
# Get dimension info.
dimNames = []
dimLengths = []
for dimNum in range(vRank):
d = v.dim(dimNum)
dimNames.append(d.info()[0])
dimLengths.append(vLen[dimNum])
res[vName] = (tuple(dimNames), tuple(dimLengths),
vType, n)
return res | [
"def",
"datasets",
"(",
"self",
")",
":",
"# Get number of datasets",
"nDs",
"=",
"self",
".",
"info",
"(",
")",
"[",
"0",
"]",
"# Inquire each var",
"res",
"=",
"{",
"}",
"for",
"n",
"in",
"range",
"(",
"nDs",
")",
":",
"# Get dataset info.",
"v",
"=",
"self",
".",
"select",
"(",
"n",
")",
"vName",
",",
"vRank",
",",
"vLen",
",",
"vType",
",",
"vAtt",
"=",
"v",
".",
"info",
"(",
")",
"if",
"vRank",
"<",
"2",
":",
"# need a sequence",
"vLen",
"=",
"[",
"vLen",
"]",
"# Get dimension info.",
"dimNames",
"=",
"[",
"]",
"dimLengths",
"=",
"[",
"]",
"for",
"dimNum",
"in",
"range",
"(",
"vRank",
")",
":",
"d",
"=",
"v",
".",
"dim",
"(",
"dimNum",
")",
"dimNames",
".",
"append",
"(",
"d",
".",
"info",
"(",
")",
"[",
"0",
"]",
")",
"dimLengths",
".",
"append",
"(",
"vLen",
"[",
"dimNum",
"]",
")",
"res",
"[",
"vName",
"]",
"=",
"(",
"tuple",
"(",
"dimNames",
")",
",",
"tuple",
"(",
"dimLengths",
")",
",",
"vType",
",",
"n",
")",
"return",
"res"
] | Return a dictionnary describing all the file datasets.
Args::
no argument
Returns::
Empty dictionnary if no dataset is defined.
Otherwise, dictionnary whose keys are the file dataset names,
and values are tuples describing the corresponding datasets.
Each tuple holds the following elements in order:
- tuple holding the names of the dimensions defining the
dataset coordinate axes
- tuple holding the dataset shape (dimension lengths);
if a dimension is unlimited, the reported length corresponds
to the dimension current length
- dataset type
- dataset index number
C library equivalent : no equivalent | [
"Return",
"a",
"dictionnary",
"describing",
"all",
"the",
"file",
"datasets",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1691-L1736 | train | 237,371 |
fhs/pyhdf | pyhdf/SD.py | SDS.endaccess | def endaccess(self):
"""Terminates access to the SDS.
Args::
no argument
Returns::
None.
The SDS instance should not be used afterwards.
The 'endaccess()' method is implicitly called when
the SDS instance is deleted.
C library equivalent : SDendaccess
"""
status = _C.SDendaccess(self._id)
_checkErr('endaccess', status, "cannot execute")
self._id = None | python | def endaccess(self):
"""Terminates access to the SDS.
Args::
no argument
Returns::
None.
The SDS instance should not be used afterwards.
The 'endaccess()' method is implicitly called when
the SDS instance is deleted.
C library equivalent : SDendaccess
"""
status = _C.SDendaccess(self._id)
_checkErr('endaccess', status, "cannot execute")
self._id = None | [
"def",
"endaccess",
"(",
"self",
")",
":",
"status",
"=",
"_C",
".",
"SDendaccess",
"(",
"self",
".",
"_id",
")",
"_checkErr",
"(",
"'endaccess'",
",",
"status",
",",
"\"cannot execute\"",
")",
"self",
".",
"_id",
"=",
"None"
] | Terminates access to the SDS.
Args::
no argument
Returns::
None.
The SDS instance should not be used afterwards.
The 'endaccess()' method is implicitly called when
the SDS instance is deleted.
C library equivalent : SDendaccess | [
"Terminates",
"access",
"to",
"the",
"SDS",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1817-L1837 | train | 237,372 |
fhs/pyhdf | pyhdf/SD.py | SDS.dim | def dim(self, dim_index):
"""Get an SDim instance given a dimension index number.
Args::
dim_index index number of the dimension (numbering starts at 0)
C library equivalent : SDgetdimid
"""
id = _C.SDgetdimid(self._id, dim_index)
_checkErr('dim', id, 'invalid SDS identifier or dimension index')
return SDim(self, id, dim_index) | python | def dim(self, dim_index):
"""Get an SDim instance given a dimension index number.
Args::
dim_index index number of the dimension (numbering starts at 0)
C library equivalent : SDgetdimid
"""
id = _C.SDgetdimid(self._id, dim_index)
_checkErr('dim', id, 'invalid SDS identifier or dimension index')
return SDim(self, id, dim_index) | [
"def",
"dim",
"(",
"self",
",",
"dim_index",
")",
":",
"id",
"=",
"_C",
".",
"SDgetdimid",
"(",
"self",
".",
"_id",
",",
"dim_index",
")",
"_checkErr",
"(",
"'dim'",
",",
"id",
",",
"'invalid SDS identifier or dimension index'",
")",
"return",
"SDim",
"(",
"self",
",",
"id",
",",
"dim_index",
")"
] | Get an SDim instance given a dimension index number.
Args::
dim_index index number of the dimension (numbering starts at 0)
C library equivalent : SDgetdimid | [
"Get",
"an",
"SDim",
"instance",
"given",
"a",
"dimension",
"index",
"number",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1840-L1851 | train | 237,373 |
fhs/pyhdf | pyhdf/SD.py | SDS.get | def get(self, start=None, count=None, stride=None):
"""Read data from the dataset.
Args::
start : indices where to start reading in the data array;
default to 0 on all dimensions
count : number of values to read along each dimension;
default to the current length of all dimensions
stride : sampling interval along each dimension;
default to 1 on all dimensions
For n-dimensional datasets, those 3 parameters are entered
using lists. For one-dimensional datasets, integers
can also be used.
Note that, to read the whole dataset contents, one should
simply call the method with no argument.
Returns::
numpy array initialized with the data.
C library equivalent : SDreaddata
The dataset can also be read using the familiar indexing and
slicing notation, like ordinary python sequences.
See "High level variable access".
"""
# Obtain SDS info.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = self.info()
if isinstance(dim_sizes, type(1)):
dim_sizes = [dim_sizes]
except HDF4Error:
raise HDF4Error('get : cannot execute')
# Validate args.
if start is None:
start = [0] * rank
elif isinstance(start, type(1)):
start = [start]
if count is None:
count = dim_sizes
if count[0] == 0:
count[0] = 1
elif isinstance(count, type(1)):
count = [count]
if stride is None:
stride = [1] * rank
elif isinstance(stride, type(1)):
stride = [stride]
if len(start) != rank or len(count) != rank or len(stride) != rank:
raise HDF4Error('get : start, stride or count ' \
'do not match SDS rank')
for n in range(rank):
if start[n] < 0 or start[n] + \
(abs(count[n]) - 1) * stride[n] >= dim_sizes[n]:
raise HDF4Error('get arguments violate ' \
'the size (%d) of dimension %d' \
% (dim_sizes[n], n))
if not data_type in SDC.equivNumericTypes:
raise HDF4Error('get cannot currrently deal with '\
'the SDS data type')
return _C._SDreaddata_0(self._id, data_type, start, count, stride) | python | def get(self, start=None, count=None, stride=None):
"""Read data from the dataset.
Args::
start : indices where to start reading in the data array;
default to 0 on all dimensions
count : number of values to read along each dimension;
default to the current length of all dimensions
stride : sampling interval along each dimension;
default to 1 on all dimensions
For n-dimensional datasets, those 3 parameters are entered
using lists. For one-dimensional datasets, integers
can also be used.
Note that, to read the whole dataset contents, one should
simply call the method with no argument.
Returns::
numpy array initialized with the data.
C library equivalent : SDreaddata
The dataset can also be read using the familiar indexing and
slicing notation, like ordinary python sequences.
See "High level variable access".
"""
# Obtain SDS info.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = self.info()
if isinstance(dim_sizes, type(1)):
dim_sizes = [dim_sizes]
except HDF4Error:
raise HDF4Error('get : cannot execute')
# Validate args.
if start is None:
start = [0] * rank
elif isinstance(start, type(1)):
start = [start]
if count is None:
count = dim_sizes
if count[0] == 0:
count[0] = 1
elif isinstance(count, type(1)):
count = [count]
if stride is None:
stride = [1] * rank
elif isinstance(stride, type(1)):
stride = [stride]
if len(start) != rank or len(count) != rank or len(stride) != rank:
raise HDF4Error('get : start, stride or count ' \
'do not match SDS rank')
for n in range(rank):
if start[n] < 0 or start[n] + \
(abs(count[n]) - 1) * stride[n] >= dim_sizes[n]:
raise HDF4Error('get arguments violate ' \
'the size (%d) of dimension %d' \
% (dim_sizes[n], n))
if not data_type in SDC.equivNumericTypes:
raise HDF4Error('get cannot currrently deal with '\
'the SDS data type')
return _C._SDreaddata_0(self._id, data_type, start, count, stride) | [
"def",
"get",
"(",
"self",
",",
"start",
"=",
"None",
",",
"count",
"=",
"None",
",",
"stride",
"=",
"None",
")",
":",
"# Obtain SDS info.",
"try",
":",
"sds_name",
",",
"rank",
",",
"dim_sizes",
",",
"data_type",
",",
"n_attrs",
"=",
"self",
".",
"info",
"(",
")",
"if",
"isinstance",
"(",
"dim_sizes",
",",
"type",
"(",
"1",
")",
")",
":",
"dim_sizes",
"=",
"[",
"dim_sizes",
"]",
"except",
"HDF4Error",
":",
"raise",
"HDF4Error",
"(",
"'get : cannot execute'",
")",
"# Validate args.",
"if",
"start",
"is",
"None",
":",
"start",
"=",
"[",
"0",
"]",
"*",
"rank",
"elif",
"isinstance",
"(",
"start",
",",
"type",
"(",
"1",
")",
")",
":",
"start",
"=",
"[",
"start",
"]",
"if",
"count",
"is",
"None",
":",
"count",
"=",
"dim_sizes",
"if",
"count",
"[",
"0",
"]",
"==",
"0",
":",
"count",
"[",
"0",
"]",
"=",
"1",
"elif",
"isinstance",
"(",
"count",
",",
"type",
"(",
"1",
")",
")",
":",
"count",
"=",
"[",
"count",
"]",
"if",
"stride",
"is",
"None",
":",
"stride",
"=",
"[",
"1",
"]",
"*",
"rank",
"elif",
"isinstance",
"(",
"stride",
",",
"type",
"(",
"1",
")",
")",
":",
"stride",
"=",
"[",
"stride",
"]",
"if",
"len",
"(",
"start",
")",
"!=",
"rank",
"or",
"len",
"(",
"count",
")",
"!=",
"rank",
"or",
"len",
"(",
"stride",
")",
"!=",
"rank",
":",
"raise",
"HDF4Error",
"(",
"'get : start, stride or count '",
"'do not match SDS rank'",
")",
"for",
"n",
"in",
"range",
"(",
"rank",
")",
":",
"if",
"start",
"[",
"n",
"]",
"<",
"0",
"or",
"start",
"[",
"n",
"]",
"+",
"(",
"abs",
"(",
"count",
"[",
"n",
"]",
")",
"-",
"1",
")",
"*",
"stride",
"[",
"n",
"]",
">=",
"dim_sizes",
"[",
"n",
"]",
":",
"raise",
"HDF4Error",
"(",
"'get arguments violate '",
"'the size (%d) of dimension %d'",
"%",
"(",
"dim_sizes",
"[",
"n",
"]",
",",
"n",
")",
")",
"if",
"not",
"data_type",
"in",
"SDC",
".",
"equivNumericTypes",
":",
"raise",
"HDF4Error",
"(",
"'get cannot currrently deal with '",
"'the SDS data type'",
")",
"return",
"_C",
".",
"_SDreaddata_0",
"(",
"self",
".",
"_id",
",",
"data_type",
",",
"start",
",",
"count",
",",
"stride",
")"
] | Read data from the dataset.
Args::
start : indices where to start reading in the data array;
default to 0 on all dimensions
count : number of values to read along each dimension;
default to the current length of all dimensions
stride : sampling interval along each dimension;
default to 1 on all dimensions
For n-dimensional datasets, those 3 parameters are entered
using lists. For one-dimensional datasets, integers
can also be used.
Note that, to read the whole dataset contents, one should
simply call the method with no argument.
Returns::
numpy array initialized with the data.
C library equivalent : SDreaddata
The dataset can also be read using the familiar indexing and
slicing notation, like ordinary python sequences.
See "High level variable access". | [
"Read",
"data",
"from",
"the",
"dataset",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1853-L1920 | train | 237,374 |
fhs/pyhdf | pyhdf/SD.py | SDS.set | def set(self, data, start=None, count=None, stride=None):
"""Write data to the dataset.
Args::
data : array of data to write; can be given as a numpy
array, or as Python sequence (whose elements can be
imbricated sequences)
start : indices where to start writing in the dataset;
default to 0 on all dimensions
count : number of values to write along each dimension;
default to the current length of dataset dimensions
stride : sampling interval along each dimension;
default to 1 on all dimensions
For n-dimensional datasets, those 3 parameters are entered
using lists. For one-dimensional datasets, integers
can also be used.
Note that, to write the whole dataset at once, one has simply
to call the method with the dataset values in parameter
'data', omitting all other parameters.
Returns::
None.
C library equivalent : SDwritedata
The dataset can also be written using the familiar indexing and
slicing notation, like ordinary python sequences.
See "High level variable access".
"""
# Obtain SDS info.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = self.info()
if isinstance(dim_sizes, type(1)):
dim_sizes = [dim_sizes]
except HDF4Error:
raise HDF4Error('set : cannot execute')
# Validate args.
if start is None:
start = [0] * rank
elif isinstance(start, type(1)):
start = [start]
if count is None:
count = dim_sizes
if count[0] == 0:
count[0] = 1
elif isinstance(count, type(1)):
count = [count]
if stride is None:
stride = [1] * rank
elif isinstance(stride, type(1)):
stride = [stride]
if len(start) != rank or len(count) != rank or len(stride) != rank:
raise HDF4Error('set : start, stride or count '\
'do not match SDS rank')
unlimited = self.isrecord()
for n in range(rank):
ok = 1
if start[n] < 0:
ok = 0
elif n > 0 or not unlimited:
if start[n] + (abs(count[n]) - 1) * stride[n] >= dim_sizes[n]:
ok = 0
if not ok:
raise HDF4Error('set arguments violate '\
'the size (%d) of dimension %d' \
% (dim_sizes[n], n))
# ??? Check support for UINT16
if not data_type in SDC.equivNumericTypes:
raise HDF4Error('set cannot currrently deal '\
'with the SDS data type')
_C._SDwritedata_0(self._id, data_type, start, count, data, stride) | python | def set(self, data, start=None, count=None, stride=None):
"""Write data to the dataset.
Args::
data : array of data to write; can be given as a numpy
array, or as Python sequence (whose elements can be
imbricated sequences)
start : indices where to start writing in the dataset;
default to 0 on all dimensions
count : number of values to write along each dimension;
default to the current length of dataset dimensions
stride : sampling interval along each dimension;
default to 1 on all dimensions
For n-dimensional datasets, those 3 parameters are entered
using lists. For one-dimensional datasets, integers
can also be used.
Note that, to write the whole dataset at once, one has simply
to call the method with the dataset values in parameter
'data', omitting all other parameters.
Returns::
None.
C library equivalent : SDwritedata
The dataset can also be written using the familiar indexing and
slicing notation, like ordinary python sequences.
See "High level variable access".
"""
# Obtain SDS info.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = self.info()
if isinstance(dim_sizes, type(1)):
dim_sizes = [dim_sizes]
except HDF4Error:
raise HDF4Error('set : cannot execute')
# Validate args.
if start is None:
start = [0] * rank
elif isinstance(start, type(1)):
start = [start]
if count is None:
count = dim_sizes
if count[0] == 0:
count[0] = 1
elif isinstance(count, type(1)):
count = [count]
if stride is None:
stride = [1] * rank
elif isinstance(stride, type(1)):
stride = [stride]
if len(start) != rank or len(count) != rank or len(stride) != rank:
raise HDF4Error('set : start, stride or count '\
'do not match SDS rank')
unlimited = self.isrecord()
for n in range(rank):
ok = 1
if start[n] < 0:
ok = 0
elif n > 0 or not unlimited:
if start[n] + (abs(count[n]) - 1) * stride[n] >= dim_sizes[n]:
ok = 0
if not ok:
raise HDF4Error('set arguments violate '\
'the size (%d) of dimension %d' \
% (dim_sizes[n], n))
# ??? Check support for UINT16
if not data_type in SDC.equivNumericTypes:
raise HDF4Error('set cannot currrently deal '\
'with the SDS data type')
_C._SDwritedata_0(self._id, data_type, start, count, data, stride) | [
"def",
"set",
"(",
"self",
",",
"data",
",",
"start",
"=",
"None",
",",
"count",
"=",
"None",
",",
"stride",
"=",
"None",
")",
":",
"# Obtain SDS info.",
"try",
":",
"sds_name",
",",
"rank",
",",
"dim_sizes",
",",
"data_type",
",",
"n_attrs",
"=",
"self",
".",
"info",
"(",
")",
"if",
"isinstance",
"(",
"dim_sizes",
",",
"type",
"(",
"1",
")",
")",
":",
"dim_sizes",
"=",
"[",
"dim_sizes",
"]",
"except",
"HDF4Error",
":",
"raise",
"HDF4Error",
"(",
"'set : cannot execute'",
")",
"# Validate args.",
"if",
"start",
"is",
"None",
":",
"start",
"=",
"[",
"0",
"]",
"*",
"rank",
"elif",
"isinstance",
"(",
"start",
",",
"type",
"(",
"1",
")",
")",
":",
"start",
"=",
"[",
"start",
"]",
"if",
"count",
"is",
"None",
":",
"count",
"=",
"dim_sizes",
"if",
"count",
"[",
"0",
"]",
"==",
"0",
":",
"count",
"[",
"0",
"]",
"=",
"1",
"elif",
"isinstance",
"(",
"count",
",",
"type",
"(",
"1",
")",
")",
":",
"count",
"=",
"[",
"count",
"]",
"if",
"stride",
"is",
"None",
":",
"stride",
"=",
"[",
"1",
"]",
"*",
"rank",
"elif",
"isinstance",
"(",
"stride",
",",
"type",
"(",
"1",
")",
")",
":",
"stride",
"=",
"[",
"stride",
"]",
"if",
"len",
"(",
"start",
")",
"!=",
"rank",
"or",
"len",
"(",
"count",
")",
"!=",
"rank",
"or",
"len",
"(",
"stride",
")",
"!=",
"rank",
":",
"raise",
"HDF4Error",
"(",
"'set : start, stride or count '",
"'do not match SDS rank'",
")",
"unlimited",
"=",
"self",
".",
"isrecord",
"(",
")",
"for",
"n",
"in",
"range",
"(",
"rank",
")",
":",
"ok",
"=",
"1",
"if",
"start",
"[",
"n",
"]",
"<",
"0",
":",
"ok",
"=",
"0",
"elif",
"n",
">",
"0",
"or",
"not",
"unlimited",
":",
"if",
"start",
"[",
"n",
"]",
"+",
"(",
"abs",
"(",
"count",
"[",
"n",
"]",
")",
"-",
"1",
")",
"*",
"stride",
"[",
"n",
"]",
">=",
"dim_sizes",
"[",
"n",
"]",
":",
"ok",
"=",
"0",
"if",
"not",
"ok",
":",
"raise",
"HDF4Error",
"(",
"'set arguments violate '",
"'the size (%d) of dimension %d'",
"%",
"(",
"dim_sizes",
"[",
"n",
"]",
",",
"n",
")",
")",
"# ??? Check support for UINT16",
"if",
"not",
"data_type",
"in",
"SDC",
".",
"equivNumericTypes",
":",
"raise",
"HDF4Error",
"(",
"'set cannot currrently deal '",
"'with the SDS data type'",
")",
"_C",
".",
"_SDwritedata_0",
"(",
"self",
".",
"_id",
",",
"data_type",
",",
"start",
",",
"count",
",",
"data",
",",
"stride",
")"
] | Write data to the dataset.
Args::
data : array of data to write; can be given as a numpy
array, or as Python sequence (whose elements can be
imbricated sequences)
start : indices where to start writing in the dataset;
default to 0 on all dimensions
count : number of values to write along each dimension;
default to the current length of dataset dimensions
stride : sampling interval along each dimension;
default to 1 on all dimensions
For n-dimensional datasets, those 3 parameters are entered
using lists. For one-dimensional datasets, integers
can also be used.
Note that, to write the whole dataset at once, one has simply
to call the method with the dataset values in parameter
'data', omitting all other parameters.
Returns::
None.
C library equivalent : SDwritedata
The dataset can also be written using the familiar indexing and
slicing notation, like ordinary python sequences.
See "High level variable access". | [
"Write",
"data",
"to",
"the",
"dataset",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1922-L2001 | train | 237,375 |
fhs/pyhdf | pyhdf/SD.py | SDS.info | def info(self):
"""Retrieves information about the dataset.
Args::
no argument
Returns::
5-element tuple holding:
- dataset name
- dataset rank (number of dimensions)
- dataset shape, that is a list giving the length of each
dataset dimension; if the first dimension is unlimited, then
the first value of the list gives the current length of the
unlimited dimension
- data type (one of the SDC.xxx values)
- number of attributes defined for the dataset
C library equivalent : SDgetinfo
"""
buf = _C.array_int32(_C.H4_MAX_VAR_DIMS)
status, sds_name, rank, data_type, n_attrs = \
_C.SDgetinfo(self._id, buf)
_checkErr('info', status, "cannot execute")
dim_sizes = _array_to_ret(buf, rank)
return sds_name, rank, dim_sizes, data_type, n_attrs | python | def info(self):
"""Retrieves information about the dataset.
Args::
no argument
Returns::
5-element tuple holding:
- dataset name
- dataset rank (number of dimensions)
- dataset shape, that is a list giving the length of each
dataset dimension; if the first dimension is unlimited, then
the first value of the list gives the current length of the
unlimited dimension
- data type (one of the SDC.xxx values)
- number of attributes defined for the dataset
C library equivalent : SDgetinfo
"""
buf = _C.array_int32(_C.H4_MAX_VAR_DIMS)
status, sds_name, rank, data_type, n_attrs = \
_C.SDgetinfo(self._id, buf)
_checkErr('info', status, "cannot execute")
dim_sizes = _array_to_ret(buf, rank)
return sds_name, rank, dim_sizes, data_type, n_attrs | [
"def",
"info",
"(",
"self",
")",
":",
"buf",
"=",
"_C",
".",
"array_int32",
"(",
"_C",
".",
"H4_MAX_VAR_DIMS",
")",
"status",
",",
"sds_name",
",",
"rank",
",",
"data_type",
",",
"n_attrs",
"=",
"_C",
".",
"SDgetinfo",
"(",
"self",
".",
"_id",
",",
"buf",
")",
"_checkErr",
"(",
"'info'",
",",
"status",
",",
"\"cannot execute\"",
")",
"dim_sizes",
"=",
"_array_to_ret",
"(",
"buf",
",",
"rank",
")",
"return",
"sds_name",
",",
"rank",
",",
"dim_sizes",
",",
"data_type",
",",
"n_attrs"
] | Retrieves information about the dataset.
Args::
no argument
Returns::
5-element tuple holding:
- dataset name
- dataset rank (number of dimensions)
- dataset shape, that is a list giving the length of each
dataset dimension; if the first dimension is unlimited, then
the first value of the list gives the current length of the
unlimited dimension
- data type (one of the SDC.xxx values)
- number of attributes defined for the dataset
C library equivalent : SDgetinfo | [
"Retrieves",
"information",
"about",
"the",
"dataset",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2102-L2130 | train | 237,376 |
fhs/pyhdf | pyhdf/SD.py | SDS.checkempty | def checkempty(self):
"""Determine whether the dataset is empty.
Args::
no argument
Returns::
True(1) if dataset is empty, False(0) if not
C library equivalent : SDcheckempty
"""
status, emptySDS = _C.SDcheckempty(self._id)
_checkErr('checkempty', status, 'invalid SDS identifier')
return emptySDS | python | def checkempty(self):
"""Determine whether the dataset is empty.
Args::
no argument
Returns::
True(1) if dataset is empty, False(0) if not
C library equivalent : SDcheckempty
"""
status, emptySDS = _C.SDcheckempty(self._id)
_checkErr('checkempty', status, 'invalid SDS identifier')
return emptySDS | [
"def",
"checkempty",
"(",
"self",
")",
":",
"status",
",",
"emptySDS",
"=",
"_C",
".",
"SDcheckempty",
"(",
"self",
".",
"_id",
")",
"_checkErr",
"(",
"'checkempty'",
",",
"status",
",",
"'invalid SDS identifier'",
")",
"return",
"emptySDS"
] | Determine whether the dataset is empty.
Args::
no argument
Returns::
True(1) if dataset is empty, False(0) if not
C library equivalent : SDcheckempty | [
"Determine",
"whether",
"the",
"dataset",
"is",
"empty",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2132-L2148 | train | 237,377 |
fhs/pyhdf | pyhdf/SD.py | SDS.ref | def ref(self):
"""Get the reference number of the dataset.
Args::
no argument
Returns::
dataset reference number
C library equivalent : SDidtoref
"""
sds_ref = _C.SDidtoref(self._id)
_checkErr('idtoref', sds_ref, 'illegal SDS identifier')
return sds_ref | python | def ref(self):
"""Get the reference number of the dataset.
Args::
no argument
Returns::
dataset reference number
C library equivalent : SDidtoref
"""
sds_ref = _C.SDidtoref(self._id)
_checkErr('idtoref', sds_ref, 'illegal SDS identifier')
return sds_ref | [
"def",
"ref",
"(",
"self",
")",
":",
"sds_ref",
"=",
"_C",
".",
"SDidtoref",
"(",
"self",
".",
"_id",
")",
"_checkErr",
"(",
"'idtoref'",
",",
"sds_ref",
",",
"'illegal SDS identifier'",
")",
"return",
"sds_ref"
] | Get the reference number of the dataset.
Args::
no argument
Returns::
dataset reference number
C library equivalent : SDidtoref | [
"Get",
"the",
"reference",
"number",
"of",
"the",
"dataset",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2150-L2166 | train | 237,378 |
fhs/pyhdf | pyhdf/SD.py | SDS.getcal | def getcal(self):
"""Retrieve the SDS calibration coefficients.
Args::
no argument
Returns::
5-element tuple holding:
- cal: calibration factor (attribute 'scale_factor')
- cal_error : calibration factor error
(attribute 'scale_factor_err')
- offset: calibration offset (attribute 'add_offset')
- offset_err : offset error (attribute 'add_offset_err')
- data_type : type of the data resulting from applying
the calibration formula to the dataset values
(attribute 'calibrated_nt')
An exception is raised if no calibration data are defined.
Original dataset values 'orival' are converted to calibrated
values 'calval' through the formula::
calval = cal * (orival - offset)
The calibration coefficients are part of the so-called
"standard" SDS attributes. The values inside the tuple returned
by 'getcal' are those of the following attributes, in order::
scale_factor, scale_factor_err, add_offset, add_offset_err,
calibrated_nt
C library equivalent: SDgetcal()
"""
status, cal, cal_error, offset, offset_err, data_type = \
_C.SDgetcal(self._id)
_checkErr('getcal', status, 'no calibration record')
return cal, cal_error, offset, offset_err, data_type | python | def getcal(self):
"""Retrieve the SDS calibration coefficients.
Args::
no argument
Returns::
5-element tuple holding:
- cal: calibration factor (attribute 'scale_factor')
- cal_error : calibration factor error
(attribute 'scale_factor_err')
- offset: calibration offset (attribute 'add_offset')
- offset_err : offset error (attribute 'add_offset_err')
- data_type : type of the data resulting from applying
the calibration formula to the dataset values
(attribute 'calibrated_nt')
An exception is raised if no calibration data are defined.
Original dataset values 'orival' are converted to calibrated
values 'calval' through the formula::
calval = cal * (orival - offset)
The calibration coefficients are part of the so-called
"standard" SDS attributes. The values inside the tuple returned
by 'getcal' are those of the following attributes, in order::
scale_factor, scale_factor_err, add_offset, add_offset_err,
calibrated_nt
C library equivalent: SDgetcal()
"""
status, cal, cal_error, offset, offset_err, data_type = \
_C.SDgetcal(self._id)
_checkErr('getcal', status, 'no calibration record')
return cal, cal_error, offset, offset_err, data_type | [
"def",
"getcal",
"(",
"self",
")",
":",
"status",
",",
"cal",
",",
"cal_error",
",",
"offset",
",",
"offset_err",
",",
"data_type",
"=",
"_C",
".",
"SDgetcal",
"(",
"self",
".",
"_id",
")",
"_checkErr",
"(",
"'getcal'",
",",
"status",
",",
"'no calibration record'",
")",
"return",
"cal",
",",
"cal_error",
",",
"offset",
",",
"offset_err",
",",
"data_type"
] | Retrieve the SDS calibration coefficients.
Args::
no argument
Returns::
5-element tuple holding:
- cal: calibration factor (attribute 'scale_factor')
- cal_error : calibration factor error
(attribute 'scale_factor_err')
- offset: calibration offset (attribute 'add_offset')
- offset_err : offset error (attribute 'add_offset_err')
- data_type : type of the data resulting from applying
the calibration formula to the dataset values
(attribute 'calibrated_nt')
An exception is raised if no calibration data are defined.
Original dataset values 'orival' are converted to calibrated
values 'calval' through the formula::
calval = cal * (orival - offset)
The calibration coefficients are part of the so-called
"standard" SDS attributes. The values inside the tuple returned
by 'getcal' are those of the following attributes, in order::
scale_factor, scale_factor_err, add_offset, add_offset_err,
calibrated_nt
C library equivalent: SDgetcal() | [
"Retrieve",
"the",
"SDS",
"calibration",
"coefficients",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2206-L2246 | train | 237,379 |
fhs/pyhdf | pyhdf/SD.py | SDS.getdatastrs | def getdatastrs(self):
"""Retrieve the dataset standard string attributes.
Args::
no argument
Returns::
4-element tuple holding:
- dataset label string (attribute 'long_name')
- dataset unit (attribute 'units')
- dataset output format (attribute 'format')
- dataset coordinate system (attribute 'coordsys')
The values returned by 'getdatastrs' are part of the
so-called "standard" SDS attributes. Those 4 values
correspond respectively to the following attributes::
long_name, units, format, coordsys .
C library equivalent: SDgetdatastrs
"""
status, label, unit, format, coord_system = \
_C.SDgetdatastrs(self._id, 128)
_checkErr('getdatastrs', status, 'cannot execute')
return label, unit, format, coord_system | python | def getdatastrs(self):
"""Retrieve the dataset standard string attributes.
Args::
no argument
Returns::
4-element tuple holding:
- dataset label string (attribute 'long_name')
- dataset unit (attribute 'units')
- dataset output format (attribute 'format')
- dataset coordinate system (attribute 'coordsys')
The values returned by 'getdatastrs' are part of the
so-called "standard" SDS attributes. Those 4 values
correspond respectively to the following attributes::
long_name, units, format, coordsys .
C library equivalent: SDgetdatastrs
"""
status, label, unit, format, coord_system = \
_C.SDgetdatastrs(self._id, 128)
_checkErr('getdatastrs', status, 'cannot execute')
return label, unit, format, coord_system | [
"def",
"getdatastrs",
"(",
"self",
")",
":",
"status",
",",
"label",
",",
"unit",
",",
"format",
",",
"coord_system",
"=",
"_C",
".",
"SDgetdatastrs",
"(",
"self",
".",
"_id",
",",
"128",
")",
"_checkErr",
"(",
"'getdatastrs'",
",",
"status",
",",
"'cannot execute'",
")",
"return",
"label",
",",
"unit",
",",
"format",
",",
"coord_system"
] | Retrieve the dataset standard string attributes.
Args::
no argument
Returns::
4-element tuple holding:
- dataset label string (attribute 'long_name')
- dataset unit (attribute 'units')
- dataset output format (attribute 'format')
- dataset coordinate system (attribute 'coordsys')
The values returned by 'getdatastrs' are part of the
so-called "standard" SDS attributes. Those 4 values
correspond respectively to the following attributes::
long_name, units, format, coordsys .
C library equivalent: SDgetdatastrs | [
"Retrieve",
"the",
"dataset",
"standard",
"string",
"attributes",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2248-L2276 | train | 237,380 |
fhs/pyhdf | pyhdf/SD.py | SDS.getrange | def getrange(self):
"""Retrieve the dataset min and max values.
Args::
no argument
Returns::
(min, max) tuple (attribute 'valid_range')
Note that those are the values as stored
by the 'setrange' method. 'getrange' does *NOT* compute the
min and max from the current dataset contents.
An exception is raised if the range is not set.
The range returned by 'getrange' is part of the so-called
"standard" SDS attributes. It corresponds to the following
attribute::
valid_range
C library equivalent: SDgetrange
"""
# Obtain SDS data type.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = \
self.info()
except HDF4Error:
raise HDF4Error('getrange : invalid SDS identifier')
n_values = 1
convert = _array_to_ret
if data_type == SDC.CHAR8:
buf1 = _C.array_byte(n_values)
buf2 = _C.array_byte(n_values)
convert = _array_to_str
elif data_type in [SDC.UCHAR8, SDC.UINT8]:
buf1 = _C.array_byte(n_values)
buf2 = _C.array_byte(n_values)
elif data_type == SDC.INT8:
buf1 = _C.array_int8(n_values)
buf2 = _C.array_int8(n_values)
elif data_type == SDC.INT16:
buf1 = _C.array_int16(n_values)
buf2 = _C.array_int16(n_values)
elif data_type == SDC.UINT16:
buf1 = _C.array_uint16(n_values)
buf2 = _C.array_uint16(n_values)
elif data_type == SDC.INT32:
buf1 = _C.array_int32(n_values)
buf2 = _C.array_int32(n_values)
elif data_type == SDC.UINT32:
buf1 = _C.array_uint32(n_values)
buf2 = _C.array_uint32(n_values)
elif data_type == SDC.FLOAT32:
buf1 = _C.array_float32(n_values)
buf2 = _C.array_float32(n_values)
elif data_type == SDC.FLOAT64:
buf1 = _C.array_float64(n_values)
buf2 = _C.array_float64(n_values)
else:
raise HDF4Error("getrange: SDS has an illegal or " \
"unsupported type %d" % data)
# Note: The C routine returns the max in buf1 and the min
# in buf2. We swap the values returned by the Python
# interface, since it is more natural to return
# min first, then max.
status = _C.SDgetrange(self._id, buf1, buf2)
_checkErr('getrange', status, 'range not set')
return convert(buf2, n_values), convert(buf1, n_values) | python | def getrange(self):
"""Retrieve the dataset min and max values.
Args::
no argument
Returns::
(min, max) tuple (attribute 'valid_range')
Note that those are the values as stored
by the 'setrange' method. 'getrange' does *NOT* compute the
min and max from the current dataset contents.
An exception is raised if the range is not set.
The range returned by 'getrange' is part of the so-called
"standard" SDS attributes. It corresponds to the following
attribute::
valid_range
C library equivalent: SDgetrange
"""
# Obtain SDS data type.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = \
self.info()
except HDF4Error:
raise HDF4Error('getrange : invalid SDS identifier')
n_values = 1
convert = _array_to_ret
if data_type == SDC.CHAR8:
buf1 = _C.array_byte(n_values)
buf2 = _C.array_byte(n_values)
convert = _array_to_str
elif data_type in [SDC.UCHAR8, SDC.UINT8]:
buf1 = _C.array_byte(n_values)
buf2 = _C.array_byte(n_values)
elif data_type == SDC.INT8:
buf1 = _C.array_int8(n_values)
buf2 = _C.array_int8(n_values)
elif data_type == SDC.INT16:
buf1 = _C.array_int16(n_values)
buf2 = _C.array_int16(n_values)
elif data_type == SDC.UINT16:
buf1 = _C.array_uint16(n_values)
buf2 = _C.array_uint16(n_values)
elif data_type == SDC.INT32:
buf1 = _C.array_int32(n_values)
buf2 = _C.array_int32(n_values)
elif data_type == SDC.UINT32:
buf1 = _C.array_uint32(n_values)
buf2 = _C.array_uint32(n_values)
elif data_type == SDC.FLOAT32:
buf1 = _C.array_float32(n_values)
buf2 = _C.array_float32(n_values)
elif data_type == SDC.FLOAT64:
buf1 = _C.array_float64(n_values)
buf2 = _C.array_float64(n_values)
else:
raise HDF4Error("getrange: SDS has an illegal or " \
"unsupported type %d" % data)
# Note: The C routine returns the max in buf1 and the min
# in buf2. We swap the values returned by the Python
# interface, since it is more natural to return
# min first, then max.
status = _C.SDgetrange(self._id, buf1, buf2)
_checkErr('getrange', status, 'range not set')
return convert(buf2, n_values), convert(buf1, n_values) | [
"def",
"getrange",
"(",
"self",
")",
":",
"# Obtain SDS data type.",
"try",
":",
"sds_name",
",",
"rank",
",",
"dim_sizes",
",",
"data_type",
",",
"n_attrs",
"=",
"self",
".",
"info",
"(",
")",
"except",
"HDF4Error",
":",
"raise",
"HDF4Error",
"(",
"'getrange : invalid SDS identifier'",
")",
"n_values",
"=",
"1",
"convert",
"=",
"_array_to_ret",
"if",
"data_type",
"==",
"SDC",
".",
"CHAR8",
":",
"buf1",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"convert",
"=",
"_array_to_str",
"elif",
"data_type",
"in",
"[",
"SDC",
".",
"UCHAR8",
",",
"SDC",
".",
"UINT8",
"]",
":",
"buf1",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT8",
":",
"buf1",
"=",
"_C",
".",
"array_int8",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_int8",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT16",
":",
"buf1",
"=",
"_C",
".",
"array_int16",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_int16",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"UINT16",
":",
"buf1",
"=",
"_C",
".",
"array_uint16",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_uint16",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT32",
":",
"buf1",
"=",
"_C",
".",
"array_int32",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_int32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"UINT32",
":",
"buf1",
"=",
"_C",
".",
"array_uint32",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_uint32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"FLOAT32",
":",
"buf1",
"=",
"_C",
".",
"array_float32",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_float32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"FLOAT64",
":",
"buf1",
"=",
"_C",
".",
"array_float64",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_float64",
"(",
"n_values",
")",
"else",
":",
"raise",
"HDF4Error",
"(",
"\"getrange: SDS has an illegal or \"",
"\"unsupported type %d\"",
"%",
"data",
")",
"# Note: The C routine returns the max in buf1 and the min",
"# in buf2. We swap the values returned by the Python",
"# interface, since it is more natural to return",
"# min first, then max.",
"status",
"=",
"_C",
".",
"SDgetrange",
"(",
"self",
".",
"_id",
",",
"buf1",
",",
"buf2",
")",
"_checkErr",
"(",
"'getrange'",
",",
"status",
",",
"'range not set'",
")",
"return",
"convert",
"(",
"buf2",
",",
"n_values",
")",
",",
"convert",
"(",
"buf1",
",",
"n_values",
")"
] | Retrieve the dataset min and max values.
Args::
no argument
Returns::
(min, max) tuple (attribute 'valid_range')
Note that those are the values as stored
by the 'setrange' method. 'getrange' does *NOT* compute the
min and max from the current dataset contents.
An exception is raised if the range is not set.
The range returned by 'getrange' is part of the so-called
"standard" SDS attributes. It corresponds to the following
attribute::
valid_range
C library equivalent: SDgetrange | [
"Retrieve",
"the",
"dataset",
"min",
"and",
"max",
"values",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2344-L2426 | train | 237,381 |
fhs/pyhdf | pyhdf/SD.py | SDS.setcal | def setcal(self, cal, cal_error, offset, offset_err, data_type):
"""Set the dataset calibration coefficients.
Args::
cal the calibraton factor (attribute 'scale_factor')
cal_error calibration factor error
(attribute 'scale_factor_err')
offset offset value (attribute 'add_offset')
offset_err offset error (attribute 'add_offset_err')
data_type data type of the values resulting from applying the
calibration formula to the dataset values
(one of the SDC.xxx constants)
(attribute 'calibrated_nt')
Returns::
None
See method 'getcal' for the definition of the calibration
formula.
Calibration coefficients are part of the so-called standard
SDS attributes. Calling 'setcal' is equivalent to setting
the following attributes, which correspond to the method
parameters, in order::
scale_factor, scale_factor_err, add_offset, add_offset_err,
calibrated_nt
C library equivalent: SDsetcal
"""
status = _C.SDsetcal(self._id, cal, cal_error,
offset, offset_err, data_type)
_checkErr('setcal', status, 'cannot execute') | python | def setcal(self, cal, cal_error, offset, offset_err, data_type):
"""Set the dataset calibration coefficients.
Args::
cal the calibraton factor (attribute 'scale_factor')
cal_error calibration factor error
(attribute 'scale_factor_err')
offset offset value (attribute 'add_offset')
offset_err offset error (attribute 'add_offset_err')
data_type data type of the values resulting from applying the
calibration formula to the dataset values
(one of the SDC.xxx constants)
(attribute 'calibrated_nt')
Returns::
None
See method 'getcal' for the definition of the calibration
formula.
Calibration coefficients are part of the so-called standard
SDS attributes. Calling 'setcal' is equivalent to setting
the following attributes, which correspond to the method
parameters, in order::
scale_factor, scale_factor_err, add_offset, add_offset_err,
calibrated_nt
C library equivalent: SDsetcal
"""
status = _C.SDsetcal(self._id, cal, cal_error,
offset, offset_err, data_type)
_checkErr('setcal', status, 'cannot execute') | [
"def",
"setcal",
"(",
"self",
",",
"cal",
",",
"cal_error",
",",
"offset",
",",
"offset_err",
",",
"data_type",
")",
":",
"status",
"=",
"_C",
".",
"SDsetcal",
"(",
"self",
".",
"_id",
",",
"cal",
",",
"cal_error",
",",
"offset",
",",
"offset_err",
",",
"data_type",
")",
"_checkErr",
"(",
"'setcal'",
",",
"status",
",",
"'cannot execute'",
")"
] | Set the dataset calibration coefficients.
Args::
cal the calibraton factor (attribute 'scale_factor')
cal_error calibration factor error
(attribute 'scale_factor_err')
offset offset value (attribute 'add_offset')
offset_err offset error (attribute 'add_offset_err')
data_type data type of the values resulting from applying the
calibration formula to the dataset values
(one of the SDC.xxx constants)
(attribute 'calibrated_nt')
Returns::
None
See method 'getcal' for the definition of the calibration
formula.
Calibration coefficients are part of the so-called standard
SDS attributes. Calling 'setcal' is equivalent to setting
the following attributes, which correspond to the method
parameters, in order::
scale_factor, scale_factor_err, add_offset, add_offset_err,
calibrated_nt
C library equivalent: SDsetcal | [
"Set",
"the",
"dataset",
"calibration",
"coefficients",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2428-L2463 | train | 237,382 |
fhs/pyhdf | pyhdf/SD.py | SDS.setdatastrs | def setdatastrs(self, label, unit, format, coord_sys):
"""Set the dataset standard string type attributes.
Args::
label dataset label (attribute 'long_name')
unit dataset unit (attribute 'units')
format dataset format (attribute 'format')
coord_sys dataset coordinate system (attribute 'coordsys')
Returns::
None
Those strings are part of the so-called standard
SDS attributes. Calling 'setdatastrs' is equivalent to setting
the following attributes, which correspond to the method
parameters, in order::
long_name, units, format, coordsys
C library equivalent: SDsetdatastrs
"""
status = _C.SDsetdatastrs(self._id, label, unit, format, coord_sys)
_checkErr('setdatastrs', status, 'cannot execute') | python | def setdatastrs(self, label, unit, format, coord_sys):
"""Set the dataset standard string type attributes.
Args::
label dataset label (attribute 'long_name')
unit dataset unit (attribute 'units')
format dataset format (attribute 'format')
coord_sys dataset coordinate system (attribute 'coordsys')
Returns::
None
Those strings are part of the so-called standard
SDS attributes. Calling 'setdatastrs' is equivalent to setting
the following attributes, which correspond to the method
parameters, in order::
long_name, units, format, coordsys
C library equivalent: SDsetdatastrs
"""
status = _C.SDsetdatastrs(self._id, label, unit, format, coord_sys)
_checkErr('setdatastrs', status, 'cannot execute') | [
"def",
"setdatastrs",
"(",
"self",
",",
"label",
",",
"unit",
",",
"format",
",",
"coord_sys",
")",
":",
"status",
"=",
"_C",
".",
"SDsetdatastrs",
"(",
"self",
".",
"_id",
",",
"label",
",",
"unit",
",",
"format",
",",
"coord_sys",
")",
"_checkErr",
"(",
"'setdatastrs'",
",",
"status",
",",
"'cannot execute'",
")"
] | Set the dataset standard string type attributes.
Args::
label dataset label (attribute 'long_name')
unit dataset unit (attribute 'units')
format dataset format (attribute 'format')
coord_sys dataset coordinate system (attribute 'coordsys')
Returns::
None
Those strings are part of the so-called standard
SDS attributes. Calling 'setdatastrs' is equivalent to setting
the following attributes, which correspond to the method
parameters, in order::
long_name, units, format, coordsys
C library equivalent: SDsetdatastrs | [
"Set",
"the",
"dataset",
"standard",
"string",
"type",
"attributes",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2465-L2490 | train | 237,383 |
fhs/pyhdf | pyhdf/SD.py | SDS.setfillvalue | def setfillvalue(self, fill_val):
"""Set the dataset fill value.
Args::
fill_val dataset fill value (attribute '_FillValue')
Returns::
None
The fill value is part of the so-called "standard" SDS
attributes. Calling 'setfillvalue' is equivalent to setting
the following attribute::
_FillValue
C library equivalent: SDsetfillvalue
"""
# Obtain SDS data type.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = self.info()
except HDF4Error:
raise HDF4Error('setfillvalue : cannot execute')
n_values = 1 # Fill value stands for 1 value.
if data_type == SDC.CHAR8:
buf = _C.array_byte(n_values)
elif data_type in [SDC.UCHAR8, SDC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == SDC.INT8:
buf = _C.array_int8(n_values)
elif data_type == SDC.INT16:
buf = _C.array_int16(n_values)
elif data_type == SDC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == SDC.INT32:
buf = _C.array_int32(n_values)
elif data_type == SDC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == SDC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == SDC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("setfillvalue: SDS has an illegal or " \
"unsupported type %d" % data_type)
buf[0] = fill_val
status = _C.SDsetfillvalue(self._id, buf)
_checkErr('setfillvalue', status, 'cannot execute') | python | def setfillvalue(self, fill_val):
"""Set the dataset fill value.
Args::
fill_val dataset fill value (attribute '_FillValue')
Returns::
None
The fill value is part of the so-called "standard" SDS
attributes. Calling 'setfillvalue' is equivalent to setting
the following attribute::
_FillValue
C library equivalent: SDsetfillvalue
"""
# Obtain SDS data type.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = self.info()
except HDF4Error:
raise HDF4Error('setfillvalue : cannot execute')
n_values = 1 # Fill value stands for 1 value.
if data_type == SDC.CHAR8:
buf = _C.array_byte(n_values)
elif data_type in [SDC.UCHAR8, SDC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == SDC.INT8:
buf = _C.array_int8(n_values)
elif data_type == SDC.INT16:
buf = _C.array_int16(n_values)
elif data_type == SDC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == SDC.INT32:
buf = _C.array_int32(n_values)
elif data_type == SDC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == SDC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == SDC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("setfillvalue: SDS has an illegal or " \
"unsupported type %d" % data_type)
buf[0] = fill_val
status = _C.SDsetfillvalue(self._id, buf)
_checkErr('setfillvalue', status, 'cannot execute') | [
"def",
"setfillvalue",
"(",
"self",
",",
"fill_val",
")",
":",
"# Obtain SDS data type.",
"try",
":",
"sds_name",
",",
"rank",
",",
"dim_sizes",
",",
"data_type",
",",
"n_attrs",
"=",
"self",
".",
"info",
"(",
")",
"except",
"HDF4Error",
":",
"raise",
"HDF4Error",
"(",
"'setfillvalue : cannot execute'",
")",
"n_values",
"=",
"1",
"# Fill value stands for 1 value.",
"if",
"data_type",
"==",
"SDC",
".",
"CHAR8",
":",
"buf",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"elif",
"data_type",
"in",
"[",
"SDC",
".",
"UCHAR8",
",",
"SDC",
".",
"UINT8",
"]",
":",
"buf",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT8",
":",
"buf",
"=",
"_C",
".",
"array_int8",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT16",
":",
"buf",
"=",
"_C",
".",
"array_int16",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"UINT16",
":",
"buf",
"=",
"_C",
".",
"array_uint16",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT32",
":",
"buf",
"=",
"_C",
".",
"array_int32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"UINT32",
":",
"buf",
"=",
"_C",
".",
"array_uint32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"FLOAT32",
":",
"buf",
"=",
"_C",
".",
"array_float32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"FLOAT64",
":",
"buf",
"=",
"_C",
".",
"array_float64",
"(",
"n_values",
")",
"else",
":",
"raise",
"HDF4Error",
"(",
"\"setfillvalue: SDS has an illegal or \"",
"\"unsupported type %d\"",
"%",
"data_type",
")",
"buf",
"[",
"0",
"]",
"=",
"fill_val",
"status",
"=",
"_C",
".",
"SDsetfillvalue",
"(",
"self",
".",
"_id",
",",
"buf",
")",
"_checkErr",
"(",
"'setfillvalue'",
",",
"status",
",",
"'cannot execute'",
")"
] | Set the dataset fill value.
Args::
fill_val dataset fill value (attribute '_FillValue')
Returns::
None
The fill value is part of the so-called "standard" SDS
attributes. Calling 'setfillvalue' is equivalent to setting
the following attribute::
_FillValue
C library equivalent: SDsetfillvalue | [
"Set",
"the",
"dataset",
"fill",
"value",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2492-L2552 | train | 237,384 |
fhs/pyhdf | pyhdf/SD.py | SDS.setrange | def setrange(self, min, max):
"""Set the dataset min and max values.
Args::
min dataset minimum value (attribute 'valid_range')
max dataset maximum value (attribute 'valid_range')
Returns::
None
The data range is part of the so-called "standard" SDS
attributes. Calling method 'setrange' is equivalent to
setting the following attribute with a 2-element [min,max]
array::
valid_range
C library equivalent: SDsetrange
"""
# Obtain SDS data type.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = self.info()
except HDF4Error:
raise HDF4Error('setrange : cannot execute')
n_values = 1
if data_type == SDC.CHAR8:
buf1 = _C.array_byte(n_values)
buf2 = _C.array_byte(n_values)
elif data_type in [SDC.UCHAR8, SDC.UINT8]:
buf1 = _C.array_byte(n_values)
buf2 = _C.array_byte(n_values)
elif data_type == SDC.INT8:
buf1 = _C.array_int8(n_values)
buf2 = _C.array_int8(n_values)
elif data_type == SDC.INT16:
buf1 = _C.array_int16(n_values)
buf2 = _C.array_int16(n_values)
elif data_type == SDC.UINT16:
buf1 = _C.array_uint16(n_values)
buf2 = _C.array_uint16(n_values)
elif data_type == SDC.INT32:
buf1 = _C.array_int32(n_values)
buf2 = _C.array_int32(n_values)
elif data_type == SDC.UINT32:
buf1 = _C.array_uint32(n_values)
buf2 = _C.array_uint32(n_values)
elif data_type == SDC.FLOAT32:
buf1 = _C.array_float32(n_values)
buf2 = _C.array_float32(n_values)
elif data_type == SDC.FLOAT64:
buf1 = _C.array_float64(n_values)
buf2 = _C.array_float64(n_values)
else:
raise HDF4Error("SDsetrange: SDS has an illegal or " \
"unsupported type %d" % data_type)
buf1[0] = max
buf2[0] = min
status = _C.SDsetrange(self._id, buf1, buf2)
_checkErr('setrange', status, 'cannot execute') | python | def setrange(self, min, max):
"""Set the dataset min and max values.
Args::
min dataset minimum value (attribute 'valid_range')
max dataset maximum value (attribute 'valid_range')
Returns::
None
The data range is part of the so-called "standard" SDS
attributes. Calling method 'setrange' is equivalent to
setting the following attribute with a 2-element [min,max]
array::
valid_range
C library equivalent: SDsetrange
"""
# Obtain SDS data type.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = self.info()
except HDF4Error:
raise HDF4Error('setrange : cannot execute')
n_values = 1
if data_type == SDC.CHAR8:
buf1 = _C.array_byte(n_values)
buf2 = _C.array_byte(n_values)
elif data_type in [SDC.UCHAR8, SDC.UINT8]:
buf1 = _C.array_byte(n_values)
buf2 = _C.array_byte(n_values)
elif data_type == SDC.INT8:
buf1 = _C.array_int8(n_values)
buf2 = _C.array_int8(n_values)
elif data_type == SDC.INT16:
buf1 = _C.array_int16(n_values)
buf2 = _C.array_int16(n_values)
elif data_type == SDC.UINT16:
buf1 = _C.array_uint16(n_values)
buf2 = _C.array_uint16(n_values)
elif data_type == SDC.INT32:
buf1 = _C.array_int32(n_values)
buf2 = _C.array_int32(n_values)
elif data_type == SDC.UINT32:
buf1 = _C.array_uint32(n_values)
buf2 = _C.array_uint32(n_values)
elif data_type == SDC.FLOAT32:
buf1 = _C.array_float32(n_values)
buf2 = _C.array_float32(n_values)
elif data_type == SDC.FLOAT64:
buf1 = _C.array_float64(n_values)
buf2 = _C.array_float64(n_values)
else:
raise HDF4Error("SDsetrange: SDS has an illegal or " \
"unsupported type %d" % data_type)
buf1[0] = max
buf2[0] = min
status = _C.SDsetrange(self._id, buf1, buf2)
_checkErr('setrange', status, 'cannot execute') | [
"def",
"setrange",
"(",
"self",
",",
"min",
",",
"max",
")",
":",
"# Obtain SDS data type.",
"try",
":",
"sds_name",
",",
"rank",
",",
"dim_sizes",
",",
"data_type",
",",
"n_attrs",
"=",
"self",
".",
"info",
"(",
")",
"except",
"HDF4Error",
":",
"raise",
"HDF4Error",
"(",
"'setrange : cannot execute'",
")",
"n_values",
"=",
"1",
"if",
"data_type",
"==",
"SDC",
".",
"CHAR8",
":",
"buf1",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"elif",
"data_type",
"in",
"[",
"SDC",
".",
"UCHAR8",
",",
"SDC",
".",
"UINT8",
"]",
":",
"buf1",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT8",
":",
"buf1",
"=",
"_C",
".",
"array_int8",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_int8",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT16",
":",
"buf1",
"=",
"_C",
".",
"array_int16",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_int16",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"UINT16",
":",
"buf1",
"=",
"_C",
".",
"array_uint16",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_uint16",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT32",
":",
"buf1",
"=",
"_C",
".",
"array_int32",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_int32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"UINT32",
":",
"buf1",
"=",
"_C",
".",
"array_uint32",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_uint32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"FLOAT32",
":",
"buf1",
"=",
"_C",
".",
"array_float32",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_float32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"FLOAT64",
":",
"buf1",
"=",
"_C",
".",
"array_float64",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_float64",
"(",
"n_values",
")",
"else",
":",
"raise",
"HDF4Error",
"(",
"\"SDsetrange: SDS has an illegal or \"",
"\"unsupported type %d\"",
"%",
"data_type",
")",
"buf1",
"[",
"0",
"]",
"=",
"max",
"buf2",
"[",
"0",
"]",
"=",
"min",
"status",
"=",
"_C",
".",
"SDsetrange",
"(",
"self",
".",
"_id",
",",
"buf1",
",",
"buf2",
")",
"_checkErr",
"(",
"'setrange'",
",",
"status",
",",
"'cannot execute'",
")"
] | Set the dataset min and max values.
Args::
min dataset minimum value (attribute 'valid_range')
max dataset maximum value (attribute 'valid_range')
Returns::
None
The data range is part of the so-called "standard" SDS
attributes. Calling method 'setrange' is equivalent to
setting the following attribute with a 2-element [min,max]
array::
valid_range
C library equivalent: SDsetrange | [
"Set",
"the",
"dataset",
"min",
"and",
"max",
"values",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2555-L2629 | train | 237,385 |
fhs/pyhdf | pyhdf/SD.py | SDS.getcompress | def getcompress(self):
"""Retrieves info about dataset compression type and mode.
Args::
no argument
Returns::
tuple holding:
- compression type (one of the SDC.COMP_xxx constants)
- optional values, depending on the compression type
COMP_NONE 0 value no additional value
COMP_SKPHUFF 1 value : skip size
COMP_DEFLATE 1 value : gzip compression level (1 to 9)
COMP_SZIP 5 values : options mask,
pixels per block (2 to 32)
pixels per scanline,
bits per pixel (number of bits in the SDS datatype)
pixels (number of elements in the SDS)
Note: in the context of an SDS, the word "pixel"
should really be understood as meaning "data element",
eg a cell value inside a multidimensional grid.
Test the options mask against constants SDC.COMP_SZIP_NN
and SDC.COMP_SZIP_EC, eg :
if optionMask & SDC.COMP_SZIP_EC:
print "EC encoding scheme used"
An exception is raised if dataset is not compressed.
.. note::
Starting with v0.8, an exception is always raised if
pyhdf was installed with the NOCOMPRESS macro set.
C library equivalent: SDgetcompress
"""
status, comp_type, value, v2, v3, v4, v5 = _C._SDgetcompress(self._id)
_checkErr('getcompress', status, 'no compression')
if comp_type == SDC.COMP_NONE:
return (comp_type,)
elif comp_type == SDC.COMP_SZIP:
return comp_type, value, v2, v3, v4, v5
else:
return comp_type, value | python | def getcompress(self):
"""Retrieves info about dataset compression type and mode.
Args::
no argument
Returns::
tuple holding:
- compression type (one of the SDC.COMP_xxx constants)
- optional values, depending on the compression type
COMP_NONE 0 value no additional value
COMP_SKPHUFF 1 value : skip size
COMP_DEFLATE 1 value : gzip compression level (1 to 9)
COMP_SZIP 5 values : options mask,
pixels per block (2 to 32)
pixels per scanline,
bits per pixel (number of bits in the SDS datatype)
pixels (number of elements in the SDS)
Note: in the context of an SDS, the word "pixel"
should really be understood as meaning "data element",
eg a cell value inside a multidimensional grid.
Test the options mask against constants SDC.COMP_SZIP_NN
and SDC.COMP_SZIP_EC, eg :
if optionMask & SDC.COMP_SZIP_EC:
print "EC encoding scheme used"
An exception is raised if dataset is not compressed.
.. note::
Starting with v0.8, an exception is always raised if
pyhdf was installed with the NOCOMPRESS macro set.
C library equivalent: SDgetcompress
"""
status, comp_type, value, v2, v3, v4, v5 = _C._SDgetcompress(self._id)
_checkErr('getcompress', status, 'no compression')
if comp_type == SDC.COMP_NONE:
return (comp_type,)
elif comp_type == SDC.COMP_SZIP:
return comp_type, value, v2, v3, v4, v5
else:
return comp_type, value | [
"def",
"getcompress",
"(",
"self",
")",
":",
"status",
",",
"comp_type",
",",
"value",
",",
"v2",
",",
"v3",
",",
"v4",
",",
"v5",
"=",
"_C",
".",
"_SDgetcompress",
"(",
"self",
".",
"_id",
")",
"_checkErr",
"(",
"'getcompress'",
",",
"status",
",",
"'no compression'",
")",
"if",
"comp_type",
"==",
"SDC",
".",
"COMP_NONE",
":",
"return",
"(",
"comp_type",
",",
")",
"elif",
"comp_type",
"==",
"SDC",
".",
"COMP_SZIP",
":",
"return",
"comp_type",
",",
"value",
",",
"v2",
",",
"v3",
",",
"v4",
",",
"v5",
"else",
":",
"return",
"comp_type",
",",
"value"
] | Retrieves info about dataset compression type and mode.
Args::
no argument
Returns::
tuple holding:
- compression type (one of the SDC.COMP_xxx constants)
- optional values, depending on the compression type
COMP_NONE 0 value no additional value
COMP_SKPHUFF 1 value : skip size
COMP_DEFLATE 1 value : gzip compression level (1 to 9)
COMP_SZIP 5 values : options mask,
pixels per block (2 to 32)
pixels per scanline,
bits per pixel (number of bits in the SDS datatype)
pixels (number of elements in the SDS)
Note: in the context of an SDS, the word "pixel"
should really be understood as meaning "data element",
eg a cell value inside a multidimensional grid.
Test the options mask against constants SDC.COMP_SZIP_NN
and SDC.COMP_SZIP_EC, eg :
if optionMask & SDC.COMP_SZIP_EC:
print "EC encoding scheme used"
An exception is raised if dataset is not compressed.
.. note::
Starting with v0.8, an exception is always raised if
pyhdf was installed with the NOCOMPRESS macro set.
C library equivalent: SDgetcompress | [
"Retrieves",
"info",
"about",
"dataset",
"compression",
"type",
"and",
"mode",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2631-L2677 | train | 237,386 |
fhs/pyhdf | pyhdf/SD.py | SDS.setcompress | def setcompress(self, comp_type, value=0, v2=0):
"""Compresses the dataset using a specified compression method.
Args::
comp_type compression type, identified by one of the
SDC.COMP_xxx constants
value,v2 auxiliary value(s) needed by some compression types
SDC.COMP_SKPHUFF Skipping-Huffman; compression value=data size in bytes, v2 is ignored
SDC.COMP_DEFLATE Gzip compression; value=deflate level (1 to 9), v2 is ignored
SDC.COMP_SZIP Szip compression; value=encoding scheme (SDC.COMP_SZIP_EC or
SDC.COMP_SZIP_NN), v2=pixels per block (2 to 32)
Returns::
None
.. note::
Starting with v0.8, an exception is always raised if
pyhdf was installed with the NOCOMPRESS macro set.
SDC.COMP_DEFLATE applies the GZIP compression to the dataset,
and the value varies from 1 to 9, according to the level of
compression desired.
SDC.COMP_SZIP compresses the dataset using the SZIP algorithm. See the HDF User's Guide
for details about the encoding scheme and the number of pixels per block. SZIP is new
with HDF 4.2.
'setcompress' must be called before writing to the dataset.
The dataset must be written all at once, unless it is
appendable (has an unlimited dimension). Updating the dataset
in not allowed. Refer to the HDF user's guide for more details
on how to use data compression.
C library equivalent: SDsetcompress
"""
status = _C._SDsetcompress(self._id, comp_type, value, v2)
_checkErr('setcompress', status, 'cannot execute') | python | def setcompress(self, comp_type, value=0, v2=0):
"""Compresses the dataset using a specified compression method.
Args::
comp_type compression type, identified by one of the
SDC.COMP_xxx constants
value,v2 auxiliary value(s) needed by some compression types
SDC.COMP_SKPHUFF Skipping-Huffman; compression value=data size in bytes, v2 is ignored
SDC.COMP_DEFLATE Gzip compression; value=deflate level (1 to 9), v2 is ignored
SDC.COMP_SZIP Szip compression; value=encoding scheme (SDC.COMP_SZIP_EC or
SDC.COMP_SZIP_NN), v2=pixels per block (2 to 32)
Returns::
None
.. note::
Starting with v0.8, an exception is always raised if
pyhdf was installed with the NOCOMPRESS macro set.
SDC.COMP_DEFLATE applies the GZIP compression to the dataset,
and the value varies from 1 to 9, according to the level of
compression desired.
SDC.COMP_SZIP compresses the dataset using the SZIP algorithm. See the HDF User's Guide
for details about the encoding scheme and the number of pixels per block. SZIP is new
with HDF 4.2.
'setcompress' must be called before writing to the dataset.
The dataset must be written all at once, unless it is
appendable (has an unlimited dimension). Updating the dataset
in not allowed. Refer to the HDF user's guide for more details
on how to use data compression.
C library equivalent: SDsetcompress
"""
status = _C._SDsetcompress(self._id, comp_type, value, v2)
_checkErr('setcompress', status, 'cannot execute') | [
"def",
"setcompress",
"(",
"self",
",",
"comp_type",
",",
"value",
"=",
"0",
",",
"v2",
"=",
"0",
")",
":",
"status",
"=",
"_C",
".",
"_SDsetcompress",
"(",
"self",
".",
"_id",
",",
"comp_type",
",",
"value",
",",
"v2",
")",
"_checkErr",
"(",
"'setcompress'",
",",
"status",
",",
"'cannot execute'",
")"
] | Compresses the dataset using a specified compression method.
Args::
comp_type compression type, identified by one of the
SDC.COMP_xxx constants
value,v2 auxiliary value(s) needed by some compression types
SDC.COMP_SKPHUFF Skipping-Huffman; compression value=data size in bytes, v2 is ignored
SDC.COMP_DEFLATE Gzip compression; value=deflate level (1 to 9), v2 is ignored
SDC.COMP_SZIP Szip compression; value=encoding scheme (SDC.COMP_SZIP_EC or
SDC.COMP_SZIP_NN), v2=pixels per block (2 to 32)
Returns::
None
.. note::
Starting with v0.8, an exception is always raised if
pyhdf was installed with the NOCOMPRESS macro set.
SDC.COMP_DEFLATE applies the GZIP compression to the dataset,
and the value varies from 1 to 9, according to the level of
compression desired.
SDC.COMP_SZIP compresses the dataset using the SZIP algorithm. See the HDF User's Guide
for details about the encoding scheme and the number of pixels per block. SZIP is new
with HDF 4.2.
'setcompress' must be called before writing to the dataset.
The dataset must be written all at once, unless it is
appendable (has an unlimited dimension). Updating the dataset
in not allowed. Refer to the HDF user's guide for more details
on how to use data compression.
C library equivalent: SDsetcompress | [
"Compresses",
"the",
"dataset",
"using",
"a",
"specified",
"compression",
"method",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2679-L2718 | train | 237,387 |
fhs/pyhdf | pyhdf/SD.py | SDS.setexternalfile | def setexternalfile(self, filename, offset=0):
"""Store the dataset data in an external file.
Args::
filename external file name
offset offset in bytes where to start writing in
the external file
Returns::
None
C library equivalent : SDsetexternalfile
"""
status = _C.SDsetexternalfile(self._id, filename, offset)
_checkErr('setexternalfile', status, 'execution error') | python | def setexternalfile(self, filename, offset=0):
"""Store the dataset data in an external file.
Args::
filename external file name
offset offset in bytes where to start writing in
the external file
Returns::
None
C library equivalent : SDsetexternalfile
"""
status = _C.SDsetexternalfile(self._id, filename, offset)
_checkErr('setexternalfile', status, 'execution error') | [
"def",
"setexternalfile",
"(",
"self",
",",
"filename",
",",
"offset",
"=",
"0",
")",
":",
"status",
"=",
"_C",
".",
"SDsetexternalfile",
"(",
"self",
".",
"_id",
",",
"filename",
",",
"offset",
")",
"_checkErr",
"(",
"'setexternalfile'",
",",
"status",
",",
"'execution error'",
")"
] | Store the dataset data in an external file.
Args::
filename external file name
offset offset in bytes where to start writing in
the external file
Returns::
None
C library equivalent : SDsetexternalfile | [
"Store",
"the",
"dataset",
"data",
"in",
"an",
"external",
"file",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2721-L2738 | train | 237,388 |
fhs/pyhdf | pyhdf/SD.py | SDS.dimensions | def dimensions(self, full=0):
"""Return a dictionnary describing every dataset dimension.
Args::
full true to get complete info about each dimension
false to report only each dimension length
Returns::
Dictionnary where each key is a dimension name. If no name
has been given to the dimension, the key is set to
'fakeDimx' where 'x' is the dimension index number.
If parameter 'full' is false, key value is the dimension
length. If 'full' is true, key value is a 5-element tuple
with the following elements:
- dimension length; for an unlimited dimension, the reported
length is the current dimension length
- dimension index number
- 1 if the dimension is unlimited, 0 otherwise
- dimension scale type, or 0 if no scale is defined for
the dimension
- number of attributes defined on the dimension
C library equivalent : no equivalent
"""
# Get the number of dimensions and their lengths.
nDims, dimLen = self.info()[1:3]
if isinstance(dimLen, int): # need a sequence
dimLen = [dimLen]
# Check if the dataset is appendable.
unlim = self.isrecord()
# Inquire each dimension
res = {}
for n in range(nDims):
d = self.dim(n)
# The length reported by info() is 0 for an unlimited dimension.
# Rather use the lengths reported by SDS.info()
name, k, scaleType, nAtt = d.info()
length = dimLen[n]
if full:
res[name] = (length, n, unlim and n == 0,
scaleType, nAtt)
else:
res[name] = length
return res | python | def dimensions(self, full=0):
"""Return a dictionnary describing every dataset dimension.
Args::
full true to get complete info about each dimension
false to report only each dimension length
Returns::
Dictionnary where each key is a dimension name. If no name
has been given to the dimension, the key is set to
'fakeDimx' where 'x' is the dimension index number.
If parameter 'full' is false, key value is the dimension
length. If 'full' is true, key value is a 5-element tuple
with the following elements:
- dimension length; for an unlimited dimension, the reported
length is the current dimension length
- dimension index number
- 1 if the dimension is unlimited, 0 otherwise
- dimension scale type, or 0 if no scale is defined for
the dimension
- number of attributes defined on the dimension
C library equivalent : no equivalent
"""
# Get the number of dimensions and their lengths.
nDims, dimLen = self.info()[1:3]
if isinstance(dimLen, int): # need a sequence
dimLen = [dimLen]
# Check if the dataset is appendable.
unlim = self.isrecord()
# Inquire each dimension
res = {}
for n in range(nDims):
d = self.dim(n)
# The length reported by info() is 0 for an unlimited dimension.
# Rather use the lengths reported by SDS.info()
name, k, scaleType, nAtt = d.info()
length = dimLen[n]
if full:
res[name] = (length, n, unlim and n == 0,
scaleType, nAtt)
else:
res[name] = length
return res | [
"def",
"dimensions",
"(",
"self",
",",
"full",
"=",
"0",
")",
":",
"# Get the number of dimensions and their lengths.",
"nDims",
",",
"dimLen",
"=",
"self",
".",
"info",
"(",
")",
"[",
"1",
":",
"3",
"]",
"if",
"isinstance",
"(",
"dimLen",
",",
"int",
")",
":",
"# need a sequence",
"dimLen",
"=",
"[",
"dimLen",
"]",
"# Check if the dataset is appendable.",
"unlim",
"=",
"self",
".",
"isrecord",
"(",
")",
"# Inquire each dimension",
"res",
"=",
"{",
"}",
"for",
"n",
"in",
"range",
"(",
"nDims",
")",
":",
"d",
"=",
"self",
".",
"dim",
"(",
"n",
")",
"# The length reported by info() is 0 for an unlimited dimension.",
"# Rather use the lengths reported by SDS.info()",
"name",
",",
"k",
",",
"scaleType",
",",
"nAtt",
"=",
"d",
".",
"info",
"(",
")",
"length",
"=",
"dimLen",
"[",
"n",
"]",
"if",
"full",
":",
"res",
"[",
"name",
"]",
"=",
"(",
"length",
",",
"n",
",",
"unlim",
"and",
"n",
"==",
"0",
",",
"scaleType",
",",
"nAtt",
")",
"else",
":",
"res",
"[",
"name",
"]",
"=",
"length",
"return",
"res"
] | Return a dictionnary describing every dataset dimension.
Args::
full true to get complete info about each dimension
false to report only each dimension length
Returns::
Dictionnary where each key is a dimension name. If no name
has been given to the dimension, the key is set to
'fakeDimx' where 'x' is the dimension index number.
If parameter 'full' is false, key value is the dimension
length. If 'full' is true, key value is a 5-element tuple
with the following elements:
- dimension length; for an unlimited dimension, the reported
length is the current dimension length
- dimension index number
- 1 if the dimension is unlimited, 0 otherwise
- dimension scale type, or 0 if no scale is defined for
the dimension
- number of attributes defined on the dimension
C library equivalent : no equivalent | [
"Return",
"a",
"dictionnary",
"describing",
"every",
"dataset",
"dimension",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2800-L2849 | train | 237,389 |
fhs/pyhdf | pyhdf/SD.py | SDim.info | def info(self):
"""Return info about the dimension instance.
Args::
no argument
Returns::
4-element tuple holding:
- dimension name; 'fakeDimx' is returned if the dimension
has not been named yet, where 'x' is the dimension
index number
- dimension length; 0 is returned if the dimension is unlimited;
call the SDim.length() or SDS.info() methods to obtain the
current dimension length
- scale data type (one of the SDC.xxx constants); 0 is
returned if no scale has been set on the dimension
- number of attributes attached to the dimension
C library equivalent : SDdiminfo
"""
status, dim_name, dim_size, data_type, n_attrs = \
_C.SDdiminfo(self._id)
_checkErr('info', status, 'cannot execute')
return dim_name, dim_size, data_type, n_attrs | python | def info(self):
"""Return info about the dimension instance.
Args::
no argument
Returns::
4-element tuple holding:
- dimension name; 'fakeDimx' is returned if the dimension
has not been named yet, where 'x' is the dimension
index number
- dimension length; 0 is returned if the dimension is unlimited;
call the SDim.length() or SDS.info() methods to obtain the
current dimension length
- scale data type (one of the SDC.xxx constants); 0 is
returned if no scale has been set on the dimension
- number of attributes attached to the dimension
C library equivalent : SDdiminfo
"""
status, dim_name, dim_size, data_type, n_attrs = \
_C.SDdiminfo(self._id)
_checkErr('info', status, 'cannot execute')
return dim_name, dim_size, data_type, n_attrs | [
"def",
"info",
"(",
"self",
")",
":",
"status",
",",
"dim_name",
",",
"dim_size",
",",
"data_type",
",",
"n_attrs",
"=",
"_C",
".",
"SDdiminfo",
"(",
"self",
".",
"_id",
")",
"_checkErr",
"(",
"'info'",
",",
"status",
",",
"'cannot execute'",
")",
"return",
"dim_name",
",",
"dim_size",
",",
"data_type",
",",
"n_attrs"
] | Return info about the dimension instance.
Args::
no argument
Returns::
4-element tuple holding:
- dimension name; 'fakeDimx' is returned if the dimension
has not been named yet, where 'x' is the dimension
index number
- dimension length; 0 is returned if the dimension is unlimited;
call the SDim.length() or SDS.info() methods to obtain the
current dimension length
- scale data type (one of the SDC.xxx constants); 0 is
returned if no scale has been set on the dimension
- number of attributes attached to the dimension
C library equivalent : SDdiminfo | [
"Return",
"info",
"about",
"the",
"dimension",
"instance",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2892-L2918 | train | 237,390 |
fhs/pyhdf | pyhdf/SD.py | SDim.setname | def setname(self, dim_name):
"""Set the dimension name.
Args::
dim_name dimension name; setting 2 dimensions to the same
name make the dimensions "shared"; in order to be
shared, the dimesions must be deined similarly.
Returns::
None
C library equivalent : SDsetdimname
"""
status = _C.SDsetdimname(self._id, dim_name)
_checkErr('setname', status, 'cannot execute') | python | def setname(self, dim_name):
"""Set the dimension name.
Args::
dim_name dimension name; setting 2 dimensions to the same
name make the dimensions "shared"; in order to be
shared, the dimesions must be deined similarly.
Returns::
None
C library equivalent : SDsetdimname
"""
status = _C.SDsetdimname(self._id, dim_name)
_checkErr('setname', status, 'cannot execute') | [
"def",
"setname",
"(",
"self",
",",
"dim_name",
")",
":",
"status",
"=",
"_C",
".",
"SDsetdimname",
"(",
"self",
".",
"_id",
",",
"dim_name",
")",
"_checkErr",
"(",
"'setname'",
",",
"status",
",",
"'cannot execute'",
")"
] | Set the dimension name.
Args::
dim_name dimension name; setting 2 dimensions to the same
name make the dimensions "shared"; in order to be
shared, the dimesions must be deined similarly.
Returns::
None
C library equivalent : SDsetdimname | [
"Set",
"the",
"dimension",
"name",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2939-L2956 | train | 237,391 |
fhs/pyhdf | pyhdf/SD.py | SDim.getscale | def getscale(self):
"""Obtain the scale values along a dimension.
Args::
no argument
Returns::
list with the scale values; the list length is equal to the
dimension length; the element type is equal to the dimension
data type, as set when the 'setdimscale()' method was called.
C library equivalent : SDgetdimscale
"""
# Get dimension info. If data_type is 0, no scale have been set
# on the dimension.
status, dim_name, dim_size, data_type, n_attrs = _C.SDdiminfo(self._id)
_checkErr('getscale', status, 'cannot execute')
if data_type == 0:
raise HDF4Error("no scale set on that dimension")
# dim_size is 0 for an unlimited dimension. The actual length is
# obtained through SDgetinfo.
if dim_size == 0:
dim_size = self._sds.info()[2][self._index]
# Get scale values.
if data_type in [SDC.UCHAR8, SDC.UINT8]:
buf = _C.array_byte(dim_size)
elif data_type == SDC.INT8:
buf = _C.array_int8(dim_size)
elif data_type == SDC.INT16:
buf = _C.array_int16(dim_size)
elif data_type == SDC.UINT16:
buf = _C.array_uint16(dim_size)
elif data_type == SDC.INT32:
buf = _C.array_int32(dim_size)
elif data_type == SDC.UINT32:
buf = _C.array_uint32(dim_size)
elif data_type == SDC.FLOAT32:
buf = _C.array_float32(dim_size)
elif data_type == SDC.FLOAT64:
buf = _C.array_float64(dim_size)
else:
raise HDF4Error("getscale: dimension has an "\
"illegal or unsupported type %d" % data_type)
status = _C.SDgetdimscale(self._id, buf)
_checkErr('getscale', status, 'cannot execute')
return _array_to_ret(buf, dim_size) | python | def getscale(self):
"""Obtain the scale values along a dimension.
Args::
no argument
Returns::
list with the scale values; the list length is equal to the
dimension length; the element type is equal to the dimension
data type, as set when the 'setdimscale()' method was called.
C library equivalent : SDgetdimscale
"""
# Get dimension info. If data_type is 0, no scale have been set
# on the dimension.
status, dim_name, dim_size, data_type, n_attrs = _C.SDdiminfo(self._id)
_checkErr('getscale', status, 'cannot execute')
if data_type == 0:
raise HDF4Error("no scale set on that dimension")
# dim_size is 0 for an unlimited dimension. The actual length is
# obtained through SDgetinfo.
if dim_size == 0:
dim_size = self._sds.info()[2][self._index]
# Get scale values.
if data_type in [SDC.UCHAR8, SDC.UINT8]:
buf = _C.array_byte(dim_size)
elif data_type == SDC.INT8:
buf = _C.array_int8(dim_size)
elif data_type == SDC.INT16:
buf = _C.array_int16(dim_size)
elif data_type == SDC.UINT16:
buf = _C.array_uint16(dim_size)
elif data_type == SDC.INT32:
buf = _C.array_int32(dim_size)
elif data_type == SDC.UINT32:
buf = _C.array_uint32(dim_size)
elif data_type == SDC.FLOAT32:
buf = _C.array_float32(dim_size)
elif data_type == SDC.FLOAT64:
buf = _C.array_float64(dim_size)
else:
raise HDF4Error("getscale: dimension has an "\
"illegal or unsupported type %d" % data_type)
status = _C.SDgetdimscale(self._id, buf)
_checkErr('getscale', status, 'cannot execute')
return _array_to_ret(buf, dim_size) | [
"def",
"getscale",
"(",
"self",
")",
":",
"# Get dimension info. If data_type is 0, no scale have been set",
"# on the dimension.",
"status",
",",
"dim_name",
",",
"dim_size",
",",
"data_type",
",",
"n_attrs",
"=",
"_C",
".",
"SDdiminfo",
"(",
"self",
".",
"_id",
")",
"_checkErr",
"(",
"'getscale'",
",",
"status",
",",
"'cannot execute'",
")",
"if",
"data_type",
"==",
"0",
":",
"raise",
"HDF4Error",
"(",
"\"no scale set on that dimension\"",
")",
"# dim_size is 0 for an unlimited dimension. The actual length is",
"# obtained through SDgetinfo.",
"if",
"dim_size",
"==",
"0",
":",
"dim_size",
"=",
"self",
".",
"_sds",
".",
"info",
"(",
")",
"[",
"2",
"]",
"[",
"self",
".",
"_index",
"]",
"# Get scale values.",
"if",
"data_type",
"in",
"[",
"SDC",
".",
"UCHAR8",
",",
"SDC",
".",
"UINT8",
"]",
":",
"buf",
"=",
"_C",
".",
"array_byte",
"(",
"dim_size",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT8",
":",
"buf",
"=",
"_C",
".",
"array_int8",
"(",
"dim_size",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT16",
":",
"buf",
"=",
"_C",
".",
"array_int16",
"(",
"dim_size",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"UINT16",
":",
"buf",
"=",
"_C",
".",
"array_uint16",
"(",
"dim_size",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT32",
":",
"buf",
"=",
"_C",
".",
"array_int32",
"(",
"dim_size",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"UINT32",
":",
"buf",
"=",
"_C",
".",
"array_uint32",
"(",
"dim_size",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"FLOAT32",
":",
"buf",
"=",
"_C",
".",
"array_float32",
"(",
"dim_size",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"FLOAT64",
":",
"buf",
"=",
"_C",
".",
"array_float64",
"(",
"dim_size",
")",
"else",
":",
"raise",
"HDF4Error",
"(",
"\"getscale: dimension has an \"",
"\"illegal or unsupported type %d\"",
"%",
"data_type",
")",
"status",
"=",
"_C",
".",
"SDgetdimscale",
"(",
"self",
".",
"_id",
",",
"buf",
")",
"_checkErr",
"(",
"'getscale'",
",",
"status",
",",
"'cannot execute'",
")",
"return",
"_array_to_ret",
"(",
"buf",
",",
"dim_size",
")"
] | Obtain the scale values along a dimension.
Args::
no argument
Returns::
list with the scale values; the list length is equal to the
dimension length; the element type is equal to the dimension
data type, as set when the 'setdimscale()' method was called.
C library equivalent : SDgetdimscale | [
"Obtain",
"the",
"scale",
"values",
"along",
"a",
"dimension",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2959-L3018 | train | 237,392 |
fhs/pyhdf | pyhdf/SD.py | SDim.setscale | def setscale(self, data_type, scale):
"""Initialize the scale values along the dimension.
Args::
data_type data type code (one of the SDC.xxx constants)
scale sequence holding the scale values; the number of
values must match the current length of the dataset
along that dimension
C library equivalent : SDsetdimscale
Setting a scale on a dimension generates what HDF calls a
"coordinate variable". This is a rank 1 dataset similar to any
other dataset, which is created to hold the scale values. The
dataset name is identical to that of the dimension on which
setscale() is called, and the data type passed in 'data_type'
determines the type of the dataset. To distinguish between such
a dataset and a "normal" dataset, call the iscoordvar() method
of the dataset instance.
"""
try:
n_values = len(scale)
except:
n_values = 1
# Validate args
info = self._sds.info()
if info[1] == 1:
dim_size = info[2]
else:
dim_size = info[2][self._index]
if n_values != dim_size:
raise HDF4Error('number of scale values (%d) does not match ' \
'dimension size (%d)' % (n_values, dim_size))
if data_type == SDC.CHAR8:
buf = _C.array_byte(n_values)
# Allow a string as the scale argument.
# Becomes a noop if already a list.
scale = list(scale)
for n in range(n_values):
scale[n] = ord(scale[n])
elif data_type in [SDC.UCHAR8, SDC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == SDC.INT8:
buf = _C.array_int8(n_values)
elif data_type == SDC.INT16:
buf = _C.array_int16(n_values)
elif data_type == SDC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == SDC.INT32:
buf = _C.array_int32(n_values)
elif data_type == SDC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == SDC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == SDC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("setscale: illegal or usupported data_type")
if n_values == 1:
buf[0] = scale
else:
for n in range(n_values):
buf[n] = scale[n]
status = _C.SDsetdimscale(self._id, n_values, data_type, buf)
_checkErr('setscale', status, 'cannot execute') | python | def setscale(self, data_type, scale):
"""Initialize the scale values along the dimension.
Args::
data_type data type code (one of the SDC.xxx constants)
scale sequence holding the scale values; the number of
values must match the current length of the dataset
along that dimension
C library equivalent : SDsetdimscale
Setting a scale on a dimension generates what HDF calls a
"coordinate variable". This is a rank 1 dataset similar to any
other dataset, which is created to hold the scale values. The
dataset name is identical to that of the dimension on which
setscale() is called, and the data type passed in 'data_type'
determines the type of the dataset. To distinguish between such
a dataset and a "normal" dataset, call the iscoordvar() method
of the dataset instance.
"""
try:
n_values = len(scale)
except:
n_values = 1
# Validate args
info = self._sds.info()
if info[1] == 1:
dim_size = info[2]
else:
dim_size = info[2][self._index]
if n_values != dim_size:
raise HDF4Error('number of scale values (%d) does not match ' \
'dimension size (%d)' % (n_values, dim_size))
if data_type == SDC.CHAR8:
buf = _C.array_byte(n_values)
# Allow a string as the scale argument.
# Becomes a noop if already a list.
scale = list(scale)
for n in range(n_values):
scale[n] = ord(scale[n])
elif data_type in [SDC.UCHAR8, SDC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == SDC.INT8:
buf = _C.array_int8(n_values)
elif data_type == SDC.INT16:
buf = _C.array_int16(n_values)
elif data_type == SDC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == SDC.INT32:
buf = _C.array_int32(n_values)
elif data_type == SDC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == SDC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == SDC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("setscale: illegal or usupported data_type")
if n_values == 1:
buf[0] = scale
else:
for n in range(n_values):
buf[n] = scale[n]
status = _C.SDsetdimscale(self._id, n_values, data_type, buf)
_checkErr('setscale', status, 'cannot execute') | [
"def",
"setscale",
"(",
"self",
",",
"data_type",
",",
"scale",
")",
":",
"try",
":",
"n_values",
"=",
"len",
"(",
"scale",
")",
"except",
":",
"n_values",
"=",
"1",
"# Validate args",
"info",
"=",
"self",
".",
"_sds",
".",
"info",
"(",
")",
"if",
"info",
"[",
"1",
"]",
"==",
"1",
":",
"dim_size",
"=",
"info",
"[",
"2",
"]",
"else",
":",
"dim_size",
"=",
"info",
"[",
"2",
"]",
"[",
"self",
".",
"_index",
"]",
"if",
"n_values",
"!=",
"dim_size",
":",
"raise",
"HDF4Error",
"(",
"'number of scale values (%d) does not match '",
"'dimension size (%d)'",
"%",
"(",
"n_values",
",",
"dim_size",
")",
")",
"if",
"data_type",
"==",
"SDC",
".",
"CHAR8",
":",
"buf",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"# Allow a string as the scale argument.",
"# Becomes a noop if already a list.",
"scale",
"=",
"list",
"(",
"scale",
")",
"for",
"n",
"in",
"range",
"(",
"n_values",
")",
":",
"scale",
"[",
"n",
"]",
"=",
"ord",
"(",
"scale",
"[",
"n",
"]",
")",
"elif",
"data_type",
"in",
"[",
"SDC",
".",
"UCHAR8",
",",
"SDC",
".",
"UINT8",
"]",
":",
"buf",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT8",
":",
"buf",
"=",
"_C",
".",
"array_int8",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT16",
":",
"buf",
"=",
"_C",
".",
"array_int16",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"UINT16",
":",
"buf",
"=",
"_C",
".",
"array_uint16",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT32",
":",
"buf",
"=",
"_C",
".",
"array_int32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"UINT32",
":",
"buf",
"=",
"_C",
".",
"array_uint32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"FLOAT32",
":",
"buf",
"=",
"_C",
".",
"array_float32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"FLOAT64",
":",
"buf",
"=",
"_C",
".",
"array_float64",
"(",
"n_values",
")",
"else",
":",
"raise",
"HDF4Error",
"(",
"\"setscale: illegal or usupported data_type\"",
")",
"if",
"n_values",
"==",
"1",
":",
"buf",
"[",
"0",
"]",
"=",
"scale",
"else",
":",
"for",
"n",
"in",
"range",
"(",
"n_values",
")",
":",
"buf",
"[",
"n",
"]",
"=",
"scale",
"[",
"n",
"]",
"status",
"=",
"_C",
".",
"SDsetdimscale",
"(",
"self",
".",
"_id",
",",
"n_values",
",",
"data_type",
",",
"buf",
")",
"_checkErr",
"(",
"'setscale'",
",",
"status",
",",
"'cannot execute'",
")"
] | Initialize the scale values along the dimension.
Args::
data_type data type code (one of the SDC.xxx constants)
scale sequence holding the scale values; the number of
values must match the current length of the dataset
along that dimension
C library equivalent : SDsetdimscale
Setting a scale on a dimension generates what HDF calls a
"coordinate variable". This is a rank 1 dataset similar to any
other dataset, which is created to hold the scale values. The
dataset name is identical to that of the dimension on which
setscale() is called, and the data type passed in 'data_type'
determines the type of the dataset. To distinguish between such
a dataset and a "normal" dataset, call the iscoordvar() method
of the dataset instance. | [
"Initialize",
"the",
"scale",
"values",
"along",
"the",
"dimension",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L3020-L3098 | train | 237,393 |
fhs/pyhdf | pyhdf/SD.py | SDim.getstrs | def getstrs(self):
"""Retrieve the dimension standard string attributes.
Args::
no argument
Returns::
3-element tuple holding:
-dimension label (attribute 'long_name')
-dimension unit (attribute 'units')
-dimension format (attribute 'format')
An exception is raised if the standard attributes have
not been set.
C library equivalent: SDgetdimstrs
"""
status, label, unit, format = _C.SDgetdimstrs(self._id, 128)
_checkErr('getstrs', status, 'cannot execute')
return label, unit, format | python | def getstrs(self):
"""Retrieve the dimension standard string attributes.
Args::
no argument
Returns::
3-element tuple holding:
-dimension label (attribute 'long_name')
-dimension unit (attribute 'units')
-dimension format (attribute 'format')
An exception is raised if the standard attributes have
not been set.
C library equivalent: SDgetdimstrs
"""
status, label, unit, format = _C.SDgetdimstrs(self._id, 128)
_checkErr('getstrs', status, 'cannot execute')
return label, unit, format | [
"def",
"getstrs",
"(",
"self",
")",
":",
"status",
",",
"label",
",",
"unit",
",",
"format",
"=",
"_C",
".",
"SDgetdimstrs",
"(",
"self",
".",
"_id",
",",
"128",
")",
"_checkErr",
"(",
"'getstrs'",
",",
"status",
",",
"'cannot execute'",
")",
"return",
"label",
",",
"unit",
",",
"format"
] | Retrieve the dimension standard string attributes.
Args::
no argument
Returns::
3-element tuple holding:
-dimension label (attribute 'long_name')
-dimension unit (attribute 'units')
-dimension format (attribute 'format')
An exception is raised if the standard attributes have
not been set.
C library equivalent: SDgetdimstrs | [
"Retrieve",
"the",
"dimension",
"standard",
"string",
"attributes",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L3100-L3122 | train | 237,394 |
fhs/pyhdf | pyhdf/SD.py | SDim.setstrs | def setstrs(self, label, unit, format):
"""Set the dimension standard string attributes.
Args::
label dimension label (attribute 'long_name')
unit dimension unit (attribute 'units')
format dimension format (attribute 'format')
Returns::
None
C library equivalent: SDsetdimstrs
"""
status = _C.SDsetdimstrs(self._id, label, unit, format)
_checkErr('setstrs', status, 'cannot execute') | python | def setstrs(self, label, unit, format):
"""Set the dimension standard string attributes.
Args::
label dimension label (attribute 'long_name')
unit dimension unit (attribute 'units')
format dimension format (attribute 'format')
Returns::
None
C library equivalent: SDsetdimstrs
"""
status = _C.SDsetdimstrs(self._id, label, unit, format)
_checkErr('setstrs', status, 'cannot execute') | [
"def",
"setstrs",
"(",
"self",
",",
"label",
",",
"unit",
",",
"format",
")",
":",
"status",
"=",
"_C",
".",
"SDsetdimstrs",
"(",
"self",
".",
"_id",
",",
"label",
",",
"unit",
",",
"format",
")",
"_checkErr",
"(",
"'setstrs'",
",",
"status",
",",
"'cannot execute'",
")"
] | Set the dimension standard string attributes.
Args::
label dimension label (attribute 'long_name')
unit dimension unit (attribute 'units')
format dimension format (attribute 'format')
Returns::
None
C library equivalent: SDsetdimstrs | [
"Set",
"the",
"dimension",
"standard",
"string",
"attributes",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L3124-L3141 | train | 237,395 |
fhs/pyhdf | pyhdf/VS.py | VS.attach | def attach(self, num_name, write=0):
"""Locate an existing vdata or create a new vdata in the HDF file,
returning a VD instance.
Args::
num_name Name or reference number of the vdata. An existing vdata
can be specified either through its reference number or
its name. Use -1 to create a new vdata.
Note that uniqueness is not imposed on vdatas names,
whereas refnums are guaranteed to be unique. Thus
knowledge of its reference number may be the only way
to get at a wanted vdata.
write Set to 0 to open the vdata in read-only mode,
set to 1 to open it in write mode
Returns::
VD instance representing the vdata
C library equivalent : VSattach
After creating a new vdata (num_name == -1), fields must be
defined using method fdefine() of the VD instance, and those
fields must be allocated to the vdata with method setfields().
Same results can be achieved, but more simply, by calling the
create() method of the VS instance.
"""
mode = write and 'w' or 'r'
if isinstance(num_name, str):
num = self.find(num_name)
else:
num = num_name
vd = _C.VSattach(self._hdf_inst._id, num, mode)
if vd < 0:
_checkErr('attach', vd, 'cannot attach vdata')
return VD(self, vd) | python | def attach(self, num_name, write=0):
"""Locate an existing vdata or create a new vdata in the HDF file,
returning a VD instance.
Args::
num_name Name or reference number of the vdata. An existing vdata
can be specified either through its reference number or
its name. Use -1 to create a new vdata.
Note that uniqueness is not imposed on vdatas names,
whereas refnums are guaranteed to be unique. Thus
knowledge of its reference number may be the only way
to get at a wanted vdata.
write Set to 0 to open the vdata in read-only mode,
set to 1 to open it in write mode
Returns::
VD instance representing the vdata
C library equivalent : VSattach
After creating a new vdata (num_name == -1), fields must be
defined using method fdefine() of the VD instance, and those
fields must be allocated to the vdata with method setfields().
Same results can be achieved, but more simply, by calling the
create() method of the VS instance.
"""
mode = write and 'w' or 'r'
if isinstance(num_name, str):
num = self.find(num_name)
else:
num = num_name
vd = _C.VSattach(self._hdf_inst._id, num, mode)
if vd < 0:
_checkErr('attach', vd, 'cannot attach vdata')
return VD(self, vd) | [
"def",
"attach",
"(",
"self",
",",
"num_name",
",",
"write",
"=",
"0",
")",
":",
"mode",
"=",
"write",
"and",
"'w'",
"or",
"'r'",
"if",
"isinstance",
"(",
"num_name",
",",
"str",
")",
":",
"num",
"=",
"self",
".",
"find",
"(",
"num_name",
")",
"else",
":",
"num",
"=",
"num_name",
"vd",
"=",
"_C",
".",
"VSattach",
"(",
"self",
".",
"_hdf_inst",
".",
"_id",
",",
"num",
",",
"mode",
")",
"if",
"vd",
"<",
"0",
":",
"_checkErr",
"(",
"'attach'",
",",
"vd",
",",
"'cannot attach vdata'",
")",
"return",
"VD",
"(",
"self",
",",
"vd",
")"
] | Locate an existing vdata or create a new vdata in the HDF file,
returning a VD instance.
Args::
num_name Name or reference number of the vdata. An existing vdata
can be specified either through its reference number or
its name. Use -1 to create a new vdata.
Note that uniqueness is not imposed on vdatas names,
whereas refnums are guaranteed to be unique. Thus
knowledge of its reference number may be the only way
to get at a wanted vdata.
write Set to 0 to open the vdata in read-only mode,
set to 1 to open it in write mode
Returns::
VD instance representing the vdata
C library equivalent : VSattach
After creating a new vdata (num_name == -1), fields must be
defined using method fdefine() of the VD instance, and those
fields must be allocated to the vdata with method setfields().
Same results can be achieved, but more simply, by calling the
create() method of the VS instance. | [
"Locate",
"an",
"existing",
"vdata",
"or",
"create",
"a",
"new",
"vdata",
"in",
"the",
"HDF",
"file",
"returning",
"a",
"VD",
"instance",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/VS.py#L872-L911 | train | 237,396 |
fhs/pyhdf | pyhdf/VS.py | VS.create | def create(self, name, fields):
"""Create a new vdata, setting its name and allocating
its fields.
Args::
name Name to assign to the vdata
fields Sequence of field definitions. Each field definition
is a sequence with the following elements in order:
- field name
- field type (one of HC.xxx constants)
- field order (number of values)
Fields are allocated to the vdata in the given order
Returns::
VD instance representing the created vdata
Calling the create() method is equivalent to the following calls:
- vd = attach(-1,1), to create a new vdata and open it in
write mode
- vd._name = name, to set the vdata name
- vd.fdefine(...), to define the name, type and order of
each field
- vd.setfields(...), to allocate fields to the vdata
C library equivalent : no equivalent
"""
try:
# Create new vdata (-1), open in write mode (1)
vd = self.attach(-1, 1)
# Set vdata name
vd._name = name
# Define fields
allNames = []
for name, type, order in fields:
vd.fdefine(name, type, order)
allNames.append(name)
# Allocate fields to the vdata
vd.setfields(*allNames)
return vd
except HDF4Error as msg:
raise HDF4Error("error creating vdata (%s)" % msg) | python | def create(self, name, fields):
"""Create a new vdata, setting its name and allocating
its fields.
Args::
name Name to assign to the vdata
fields Sequence of field definitions. Each field definition
is a sequence with the following elements in order:
- field name
- field type (one of HC.xxx constants)
- field order (number of values)
Fields are allocated to the vdata in the given order
Returns::
VD instance representing the created vdata
Calling the create() method is equivalent to the following calls:
- vd = attach(-1,1), to create a new vdata and open it in
write mode
- vd._name = name, to set the vdata name
- vd.fdefine(...), to define the name, type and order of
each field
- vd.setfields(...), to allocate fields to the vdata
C library equivalent : no equivalent
"""
try:
# Create new vdata (-1), open in write mode (1)
vd = self.attach(-1, 1)
# Set vdata name
vd._name = name
# Define fields
allNames = []
for name, type, order in fields:
vd.fdefine(name, type, order)
allNames.append(name)
# Allocate fields to the vdata
vd.setfields(*allNames)
return vd
except HDF4Error as msg:
raise HDF4Error("error creating vdata (%s)" % msg) | [
"def",
"create",
"(",
"self",
",",
"name",
",",
"fields",
")",
":",
"try",
":",
"# Create new vdata (-1), open in write mode (1)",
"vd",
"=",
"self",
".",
"attach",
"(",
"-",
"1",
",",
"1",
")",
"# Set vdata name",
"vd",
".",
"_name",
"=",
"name",
"# Define fields",
"allNames",
"=",
"[",
"]",
"for",
"name",
",",
"type",
",",
"order",
"in",
"fields",
":",
"vd",
".",
"fdefine",
"(",
"name",
",",
"type",
",",
"order",
")",
"allNames",
".",
"append",
"(",
"name",
")",
"# Allocate fields to the vdata",
"vd",
".",
"setfields",
"(",
"*",
"allNames",
")",
"return",
"vd",
"except",
"HDF4Error",
"as",
"msg",
":",
"raise",
"HDF4Error",
"(",
"\"error creating vdata (%s)\"",
"%",
"msg",
")"
] | Create a new vdata, setting its name and allocating
its fields.
Args::
name Name to assign to the vdata
fields Sequence of field definitions. Each field definition
is a sequence with the following elements in order:
- field name
- field type (one of HC.xxx constants)
- field order (number of values)
Fields are allocated to the vdata in the given order
Returns::
VD instance representing the created vdata
Calling the create() method is equivalent to the following calls:
- vd = attach(-1,1), to create a new vdata and open it in
write mode
- vd._name = name, to set the vdata name
- vd.fdefine(...), to define the name, type and order of
each field
- vd.setfields(...), to allocate fields to the vdata
C library equivalent : no equivalent | [
"Create",
"a",
"new",
"vdata",
"setting",
"its",
"name",
"and",
"allocating",
"its",
"fields",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/VS.py#L913-L959 | train | 237,397 |
fhs/pyhdf | pyhdf/VS.py | VS.next | def next(self, vRef):
"""Get the reference number of the vdata following a given
vdata.
Args::
vRef Reference number of the vdata preceding the one
we require. Set to -1 to get the first vdata in
the HDF file. Knowing its reference number,
the vdata can then be opened (attached) by passing this
reference number to the attach() method.
Returns::
Reference number of the vdata following the one given
by argument vref
An exception is raised if no vdata follows the one given by vRef.
C library equivalent : VSgetid
"""
num = _C.VSgetid(self._hdf_inst._id, vRef)
_checkErr('next', num, 'cannot get next vdata')
return num | python | def next(self, vRef):
"""Get the reference number of the vdata following a given
vdata.
Args::
vRef Reference number of the vdata preceding the one
we require. Set to -1 to get the first vdata in
the HDF file. Knowing its reference number,
the vdata can then be opened (attached) by passing this
reference number to the attach() method.
Returns::
Reference number of the vdata following the one given
by argument vref
An exception is raised if no vdata follows the one given by vRef.
C library equivalent : VSgetid
"""
num = _C.VSgetid(self._hdf_inst._id, vRef)
_checkErr('next', num, 'cannot get next vdata')
return num | [
"def",
"next",
"(",
"self",
",",
"vRef",
")",
":",
"num",
"=",
"_C",
".",
"VSgetid",
"(",
"self",
".",
"_hdf_inst",
".",
"_id",
",",
"vRef",
")",
"_checkErr",
"(",
"'next'",
",",
"num",
",",
"'cannot get next vdata'",
")",
"return",
"num"
] | Get the reference number of the vdata following a given
vdata.
Args::
vRef Reference number of the vdata preceding the one
we require. Set to -1 to get the first vdata in
the HDF file. Knowing its reference number,
the vdata can then be opened (attached) by passing this
reference number to the attach() method.
Returns::
Reference number of the vdata following the one given
by argument vref
An exception is raised if no vdata follows the one given by vRef.
C library equivalent : VSgetid | [
"Get",
"the",
"reference",
"number",
"of",
"the",
"vdata",
"following",
"a",
"given",
"vdata",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/VS.py#L984-L1008 | train | 237,398 |
fhs/pyhdf | pyhdf/VS.py | VS.vdatainfo | def vdatainfo(self, listAttr=0):
"""Return info about all the file vdatas.
Args::
listAttr Set to 0 to ignore vdatas used to store attribute
values, 1 to list them (see the VD._isattr readonly
attribute)
Returns::
List of vdata descriptions. Each vdata is described as
a 9-element tuple, composed of the following:
- vdata name
- vdata class
- vdata reference number
- vdata number of records
- vdata number of fields
- vdata number of attributes
- vdata record size in bytes
- vdata tag number
- vdata interlace mode
C library equivalent : no equivalent
"""
lst = []
ref = -1 # start at beginning
while True:
try:
nxtRef = self.next(ref)
except HDF4Error: # no vdata left
break
# Attach the vdata and check for an "attribute" vdata.
ref = nxtRef
vdObj = self.attach(ref)
if listAttr or not vdObj._isattr:
# Append a list of vdata properties.
lst.append((vdObj._name,
vdObj._class,
vdObj._refnum,
vdObj._nrecs,
vdObj._nfields,
vdObj._nattrs,
vdObj._recsize,
vdObj._tag,
vdObj._interlace))
vdObj.detach()
return lst | python | def vdatainfo(self, listAttr=0):
"""Return info about all the file vdatas.
Args::
listAttr Set to 0 to ignore vdatas used to store attribute
values, 1 to list them (see the VD._isattr readonly
attribute)
Returns::
List of vdata descriptions. Each vdata is described as
a 9-element tuple, composed of the following:
- vdata name
- vdata class
- vdata reference number
- vdata number of records
- vdata number of fields
- vdata number of attributes
- vdata record size in bytes
- vdata tag number
- vdata interlace mode
C library equivalent : no equivalent
"""
lst = []
ref = -1 # start at beginning
while True:
try:
nxtRef = self.next(ref)
except HDF4Error: # no vdata left
break
# Attach the vdata and check for an "attribute" vdata.
ref = nxtRef
vdObj = self.attach(ref)
if listAttr or not vdObj._isattr:
# Append a list of vdata properties.
lst.append((vdObj._name,
vdObj._class,
vdObj._refnum,
vdObj._nrecs,
vdObj._nfields,
vdObj._nattrs,
vdObj._recsize,
vdObj._tag,
vdObj._interlace))
vdObj.detach()
return lst | [
"def",
"vdatainfo",
"(",
"self",
",",
"listAttr",
"=",
"0",
")",
":",
"lst",
"=",
"[",
"]",
"ref",
"=",
"-",
"1",
"# start at beginning",
"while",
"True",
":",
"try",
":",
"nxtRef",
"=",
"self",
".",
"next",
"(",
"ref",
")",
"except",
"HDF4Error",
":",
"# no vdata left",
"break",
"# Attach the vdata and check for an \"attribute\" vdata.",
"ref",
"=",
"nxtRef",
"vdObj",
"=",
"self",
".",
"attach",
"(",
"ref",
")",
"if",
"listAttr",
"or",
"not",
"vdObj",
".",
"_isattr",
":",
"# Append a list of vdata properties.",
"lst",
".",
"append",
"(",
"(",
"vdObj",
".",
"_name",
",",
"vdObj",
".",
"_class",
",",
"vdObj",
".",
"_refnum",
",",
"vdObj",
".",
"_nrecs",
",",
"vdObj",
".",
"_nfields",
",",
"vdObj",
".",
"_nattrs",
",",
"vdObj",
".",
"_recsize",
",",
"vdObj",
".",
"_tag",
",",
"vdObj",
".",
"_interlace",
")",
")",
"vdObj",
".",
"detach",
"(",
")",
"return",
"lst"
] | Return info about all the file vdatas.
Args::
listAttr Set to 0 to ignore vdatas used to store attribute
values, 1 to list them (see the VD._isattr readonly
attribute)
Returns::
List of vdata descriptions. Each vdata is described as
a 9-element tuple, composed of the following:
- vdata name
- vdata class
- vdata reference number
- vdata number of records
- vdata number of fields
- vdata number of attributes
- vdata record size in bytes
- vdata tag number
- vdata interlace mode
C library equivalent : no equivalent | [
"Return",
"info",
"about",
"all",
"the",
"file",
"vdatas",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/VS.py#L1010-L1060 | train | 237,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.