repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
kurtbrose/pyjks | jks/jks.py | PrivateKeyEntry.encrypt | def encrypt(self, key_password):
"""
Encrypts the private key, so that it can be saved to a keystore.
This will make it necessary to decrypt it again if it is going to be used later.
Has no effect if the entry is already encrypted.
:param str key_password: The password to encrypt the entry with.
"""
if not self.is_decrypted():
return
encrypted_private_key = sun_crypto.jks_pkey_encrypt(self.pkey_pkcs8, key_password)
a = AlgorithmIdentifier()
a.setComponentByName('algorithm', sun_crypto.SUN_JKS_ALGO_ID)
a.setComponentByName('parameters', '\x05\x00')
epki = rfc5208.EncryptedPrivateKeyInfo()
epki.setComponentByName('encryptionAlgorithm',a)
epki.setComponentByName('encryptedData', encrypted_private_key)
self._encrypted = encoder.encode(epki)
self._pkey = None
self._pkey_pkcs8 = None
self._algorithm_oid = None | python | def encrypt(self, key_password):
"""
Encrypts the private key, so that it can be saved to a keystore.
This will make it necessary to decrypt it again if it is going to be used later.
Has no effect if the entry is already encrypted.
:param str key_password: The password to encrypt the entry with.
"""
if not self.is_decrypted():
return
encrypted_private_key = sun_crypto.jks_pkey_encrypt(self.pkey_pkcs8, key_password)
a = AlgorithmIdentifier()
a.setComponentByName('algorithm', sun_crypto.SUN_JKS_ALGO_ID)
a.setComponentByName('parameters', '\x05\x00')
epki = rfc5208.EncryptedPrivateKeyInfo()
epki.setComponentByName('encryptionAlgorithm',a)
epki.setComponentByName('encryptedData', encrypted_private_key)
self._encrypted = encoder.encode(epki)
self._pkey = None
self._pkey_pkcs8 = None
self._algorithm_oid = None | [
"def",
"encrypt",
"(",
"self",
",",
"key_password",
")",
":",
"if",
"not",
"self",
".",
"is_decrypted",
"(",
")",
":",
"return",
"encrypted_private_key",
"=",
"sun_crypto",
".",
"jks_pkey_encrypt",
"(",
"self",
".",
"pkey_pkcs8",
",",
"key_password",
")",
"a... | Encrypts the private key, so that it can be saved to a keystore.
This will make it necessary to decrypt it again if it is going to be used later.
Has no effect if the entry is already encrypted.
:param str key_password: The password to encrypt the entry with. | [
"Encrypts",
"the",
"private",
"key",
"so",
"that",
"it",
"can",
"be",
"saved",
"to",
"a",
"keystore",
"."
] | 1cbe7f060e2ad076b6462f3273f11d635771ea3d | https://github.com/kurtbrose/pyjks/blob/1cbe7f060e2ad076b6462f3273f11d635771ea3d/jks/jks.py#L229-L253 | train | 213,000 |
kurtbrose/pyjks | jks/jks.py | SecretKeyEntry.new | def new(cls, alias, sealed_obj, algorithm, key, key_size):
"""
Helper function to create a new SecretKeyEntry.
:returns: A loaded :class:`SecretKeyEntry` instance, ready
to be placed in a keystore.
"""
timestamp = int(time.time()) * 1000
raise NotImplementedError("Creating Secret Keys not implemented") | python | def new(cls, alias, sealed_obj, algorithm, key, key_size):
"""
Helper function to create a new SecretKeyEntry.
:returns: A loaded :class:`SecretKeyEntry` instance, ready
to be placed in a keystore.
"""
timestamp = int(time.time()) * 1000
raise NotImplementedError("Creating Secret Keys not implemented") | [
"def",
"new",
"(",
"cls",
",",
"alias",
",",
"sealed_obj",
",",
"algorithm",
",",
"key",
",",
"key_size",
")",
":",
"timestamp",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"*",
"1000",
"raise",
"NotImplementedError",
"(",
"\"Creating Secret Key... | Helper function to create a new SecretKeyEntry.
:returns: A loaded :class:`SecretKeyEntry` instance, ready
to be placed in a keystore. | [
"Helper",
"function",
"to",
"create",
"a",
"new",
"SecretKeyEntry",
"."
] | 1cbe7f060e2ad076b6462f3273f11d635771ea3d | https://github.com/kurtbrose/pyjks/blob/1cbe7f060e2ad076b6462f3273f11d635771ea3d/jks/jks.py#L269-L278 | train | 213,001 |
kurtbrose/pyjks | jks/jks.py | KeyStore.new | def new(cls, store_type, store_entries):
"""
Helper function to create a new KeyStore.
:param string store_type: What kind of keystore
the store should be. Valid options are jks or jceks.
:param list store_entries: Existing entries that
should be added to the keystore.
:returns: A loaded :class:`KeyStore` instance,
with the specified entries.
:raises DuplicateAliasException: If some of the
entries have the same alias.
:raises UnsupportedKeyStoreTypeException: If the keystore is of
an unsupported type
:raises UnsupportedKeyStoreEntryTypeException: If some
of the keystore entries are unsupported (in this keystore type)
"""
if store_type not in ['jks', 'jceks']:
raise UnsupportedKeystoreTypeException("The Keystore Type '%s' is not supported" % store_type)
entries = {}
for entry in store_entries:
if not isinstance(entry, AbstractKeystoreEntry):
raise UnsupportedKeystoreEntryTypeException("Entries must be a KeyStore Entry")
if store_type != 'jceks' and isinstance(entry, SecretKeyEntry):
raise UnsupportedKeystoreEntryTypeException('Secret Key only allowed in JCEKS keystores')
alias = entry.alias
if alias in entries:
raise DuplicateAliasException("Found duplicate alias '%s'" % alias)
entries[alias] = entry
return cls(store_type, entries) | python | def new(cls, store_type, store_entries):
"""
Helper function to create a new KeyStore.
:param string store_type: What kind of keystore
the store should be. Valid options are jks or jceks.
:param list store_entries: Existing entries that
should be added to the keystore.
:returns: A loaded :class:`KeyStore` instance,
with the specified entries.
:raises DuplicateAliasException: If some of the
entries have the same alias.
:raises UnsupportedKeyStoreTypeException: If the keystore is of
an unsupported type
:raises UnsupportedKeyStoreEntryTypeException: If some
of the keystore entries are unsupported (in this keystore type)
"""
if store_type not in ['jks', 'jceks']:
raise UnsupportedKeystoreTypeException("The Keystore Type '%s' is not supported" % store_type)
entries = {}
for entry in store_entries:
if not isinstance(entry, AbstractKeystoreEntry):
raise UnsupportedKeystoreEntryTypeException("Entries must be a KeyStore Entry")
if store_type != 'jceks' and isinstance(entry, SecretKeyEntry):
raise UnsupportedKeystoreEntryTypeException('Secret Key only allowed in JCEKS keystores')
alias = entry.alias
if alias in entries:
raise DuplicateAliasException("Found duplicate alias '%s'" % alias)
entries[alias] = entry
return cls(store_type, entries) | [
"def",
"new",
"(",
"cls",
",",
"store_type",
",",
"store_entries",
")",
":",
"if",
"store_type",
"not",
"in",
"[",
"'jks'",
",",
"'jceks'",
"]",
":",
"raise",
"UnsupportedKeystoreTypeException",
"(",
"\"The Keystore Type '%s' is not supported\"",
"%",
"store_type",
... | Helper function to create a new KeyStore.
:param string store_type: What kind of keystore
the store should be. Valid options are jks or jceks.
:param list store_entries: Existing entries that
should be added to the keystore.
:returns: A loaded :class:`KeyStore` instance,
with the specified entries.
:raises DuplicateAliasException: If some of the
entries have the same alias.
:raises UnsupportedKeyStoreTypeException: If the keystore is of
an unsupported type
:raises UnsupportedKeyStoreEntryTypeException: If some
of the keystore entries are unsupported (in this keystore type) | [
"Helper",
"function",
"to",
"create",
"a",
"new",
"KeyStore",
"."
] | 1cbe7f060e2ad076b6462f3273f11d635771ea3d | https://github.com/kurtbrose/pyjks/blob/1cbe7f060e2ad076b6462f3273f11d635771ea3d/jks/jks.py#L383-L419 | train | 213,002 |
kurtbrose/pyjks | jks/jks.py | KeyStore.saves | def saves(self, store_password):
"""
Saves the keystore so that it can be read by other applications.
If any of the private keys are unencrypted, they will be encrypted
with the same password as the keystore.
:param str store_password: Password for the created keystore
(and for any unencrypted keys)
:returns: A byte string representation of the keystore.
:raises UnsupportedKeystoreTypeException: If the keystore
is of an unsupported type
:raises UnsupportedKeystoreEntryTypeException: If the keystore
contains an unsupported entry type
"""
if self.store_type == 'jks':
keystore = MAGIC_NUMBER_JKS
elif self.store_type == 'jceks':
raise NotImplementedError("Saving of JCEKS keystores is not implemented")
else:
raise UnsupportedKeystoreTypeException("Only JKS and JCEKS keystores are supported")
keystore += b4.pack(2) # version 2
keystore += b4.pack(len(self.entries))
for alias, item in self.entries.items():
if isinstance(item, TrustedCertEntry):
keystore += self._write_trusted_cert(alias, item)
elif isinstance(item, PrivateKeyEntry):
keystore += self._write_private_key(alias, item, store_password)
elif isinstance(item, SecretKeyEntry):
if self.store_type != 'jceks':
raise UnsupportedKeystoreEntryTypeException('Secret Key only allowed in JCEKS keystores')
raise NotImplementedError("Saving of Secret Keys not implemented")
else:
raise UnsupportedKeystoreEntryTypeException("Unknown entry type in keystore")
hash_fn = hashlib.sha1
store_password_utf16 = store_password.encode('utf-16be')
hash = hash_fn(store_password_utf16 + SIGNATURE_WHITENING + keystore).digest()
keystore += hash
return keystore | python | def saves(self, store_password):
"""
Saves the keystore so that it can be read by other applications.
If any of the private keys are unencrypted, they will be encrypted
with the same password as the keystore.
:param str store_password: Password for the created keystore
(and for any unencrypted keys)
:returns: A byte string representation of the keystore.
:raises UnsupportedKeystoreTypeException: If the keystore
is of an unsupported type
:raises UnsupportedKeystoreEntryTypeException: If the keystore
contains an unsupported entry type
"""
if self.store_type == 'jks':
keystore = MAGIC_NUMBER_JKS
elif self.store_type == 'jceks':
raise NotImplementedError("Saving of JCEKS keystores is not implemented")
else:
raise UnsupportedKeystoreTypeException("Only JKS and JCEKS keystores are supported")
keystore += b4.pack(2) # version 2
keystore += b4.pack(len(self.entries))
for alias, item in self.entries.items():
if isinstance(item, TrustedCertEntry):
keystore += self._write_trusted_cert(alias, item)
elif isinstance(item, PrivateKeyEntry):
keystore += self._write_private_key(alias, item, store_password)
elif isinstance(item, SecretKeyEntry):
if self.store_type != 'jceks':
raise UnsupportedKeystoreEntryTypeException('Secret Key only allowed in JCEKS keystores')
raise NotImplementedError("Saving of Secret Keys not implemented")
else:
raise UnsupportedKeystoreEntryTypeException("Unknown entry type in keystore")
hash_fn = hashlib.sha1
store_password_utf16 = store_password.encode('utf-16be')
hash = hash_fn(store_password_utf16 + SIGNATURE_WHITENING + keystore).digest()
keystore += hash
return keystore | [
"def",
"saves",
"(",
"self",
",",
"store_password",
")",
":",
"if",
"self",
".",
"store_type",
"==",
"'jks'",
":",
"keystore",
"=",
"MAGIC_NUMBER_JKS",
"elif",
"self",
".",
"store_type",
"==",
"'jceks'",
":",
"raise",
"NotImplementedError",
"(",
"\"Saving of J... | Saves the keystore so that it can be read by other applications.
If any of the private keys are unencrypted, they will be encrypted
with the same password as the keystore.
:param str store_password: Password for the created keystore
(and for any unencrypted keys)
:returns: A byte string representation of the keystore.
:raises UnsupportedKeystoreTypeException: If the keystore
is of an unsupported type
:raises UnsupportedKeystoreEntryTypeException: If the keystore
contains an unsupported entry type | [
"Saves",
"the",
"keystore",
"so",
"that",
"it",
"can",
"be",
"read",
"by",
"other",
"applications",
"."
] | 1cbe7f060e2ad076b6462f3273f11d635771ea3d | https://github.com/kurtbrose/pyjks/blob/1cbe7f060e2ad076b6462f3273f11d635771ea3d/jks/jks.py#L577-L622 | train | 213,003 |
kurtbrose/pyjks | jks/jks.py | KeyStore._java_is_subclass | def _java_is_subclass(cls, obj, class_name):
"""Given a deserialized JavaObject as returned by the javaobj library,
determine whether it's a subclass of the given class name.
"""
clazz = obj.get_class()
while clazz:
if clazz.name == class_name:
return True
clazz = clazz.superclass
return False | python | def _java_is_subclass(cls, obj, class_name):
"""Given a deserialized JavaObject as returned by the javaobj library,
determine whether it's a subclass of the given class name.
"""
clazz = obj.get_class()
while clazz:
if clazz.name == class_name:
return True
clazz = clazz.superclass
return False | [
"def",
"_java_is_subclass",
"(",
"cls",
",",
"obj",
",",
"class_name",
")",
":",
"clazz",
"=",
"obj",
".",
"get_class",
"(",
")",
"while",
"clazz",
":",
"if",
"clazz",
".",
"name",
"==",
"class_name",
":",
"return",
"True",
"clazz",
"=",
"clazz",
".",
... | Given a deserialized JavaObject as returned by the javaobj library,
determine whether it's a subclass of the given class name. | [
"Given",
"a",
"deserialized",
"JavaObject",
"as",
"returned",
"by",
"the",
"javaobj",
"library",
"determine",
"whether",
"it",
"s",
"a",
"subclass",
"of",
"the",
"given",
"class",
"name",
"."
] | 1cbe7f060e2ad076b6462f3273f11d635771ea3d | https://github.com/kurtbrose/pyjks/blob/1cbe7f060e2ad076b6462f3273f11d635771ea3d/jks/jks.py#L730-L739 | train | 213,004 |
kurtbrose/pyjks | jks/rfc7292.py | _adjust | def _adjust(a, a_offset, b):
"""
a = bytearray
a_offset = int
b = bytearray
"""
x = (b[-1] & 0xFF) + (a[a_offset + len(b) - 1] & 0xFF) + 1
a[a_offset + len(b) - 1] = ctypes.c_ubyte(x).value
x >>= 8
for i in range(len(b)-2, -1, -1):
x += (b[i] & 0xFF) + (a[a_offset + i] & 0xFF)
a[a_offset + i] = ctypes.c_ubyte(x).value
x >>= 8 | python | def _adjust(a, a_offset, b):
"""
a = bytearray
a_offset = int
b = bytearray
"""
x = (b[-1] & 0xFF) + (a[a_offset + len(b) - 1] & 0xFF) + 1
a[a_offset + len(b) - 1] = ctypes.c_ubyte(x).value
x >>= 8
for i in range(len(b)-2, -1, -1):
x += (b[i] & 0xFF) + (a[a_offset + i] & 0xFF)
a[a_offset + i] = ctypes.c_ubyte(x).value
x >>= 8 | [
"def",
"_adjust",
"(",
"a",
",",
"a_offset",
",",
"b",
")",
":",
"x",
"=",
"(",
"b",
"[",
"-",
"1",
"]",
"&",
"0xFF",
")",
"+",
"(",
"a",
"[",
"a_offset",
"+",
"len",
"(",
"b",
")",
"-",
"1",
"]",
"&",
"0xFF",
")",
"+",
"1",
"a",
"[",
... | a = bytearray
a_offset = int
b = bytearray | [
"a",
"=",
"bytearray",
"a_offset",
"=",
"int",
"b",
"=",
"bytearray"
] | 1cbe7f060e2ad076b6462f3273f11d635771ea3d | https://github.com/kurtbrose/pyjks/blob/1cbe7f060e2ad076b6462f3273f11d635771ea3d/jks/rfc7292.py#L69-L82 | train | 213,005 |
kurtbrose/pyjks | jks/sun_crypto.py | jks_pkey_encrypt | def jks_pkey_encrypt(key, password_str):
"""
Encrypts the private key with password protection algorithm used by JKS keystores.
"""
password_bytes = password_str.encode('utf-16be') # Java chars are UTF-16BE code units
iv = os.urandom(20)
key = bytearray(key)
xoring = zip(key, _jks_keystream(iv, password_bytes))
data = bytearray([d^k for d,k in xoring])
check = hashlib.sha1(bytes(password_bytes + key)).digest()
return bytes(iv + data + check) | python | def jks_pkey_encrypt(key, password_str):
"""
Encrypts the private key with password protection algorithm used by JKS keystores.
"""
password_bytes = password_str.encode('utf-16be') # Java chars are UTF-16BE code units
iv = os.urandom(20)
key = bytearray(key)
xoring = zip(key, _jks_keystream(iv, password_bytes))
data = bytearray([d^k for d,k in xoring])
check = hashlib.sha1(bytes(password_bytes + key)).digest()
return bytes(iv + data + check) | [
"def",
"jks_pkey_encrypt",
"(",
"key",
",",
"password_str",
")",
":",
"password_bytes",
"=",
"password_str",
".",
"encode",
"(",
"'utf-16be'",
")",
"# Java chars are UTF-16BE code units",
"iv",
"=",
"os",
".",
"urandom",
"(",
"20",
")",
"key",
"=",
"bytearray",
... | Encrypts the private key with password protection algorithm used by JKS keystores. | [
"Encrypts",
"the",
"private",
"key",
"with",
"password",
"protection",
"algorithm",
"used",
"by",
"JKS",
"keystores",
"."
] | 1cbe7f060e2ad076b6462f3273f11d635771ea3d | https://github.com/kurtbrose/pyjks/blob/1cbe7f060e2ad076b6462f3273f11d635771ea3d/jks/sun_crypto.py#L9-L21 | train | 213,006 |
kurtbrose/pyjks | jks/sun_crypto.py | _jks_keystream | def _jks_keystream(iv, password):
"""Helper keystream generator for _jks_pkey_decrypt"""
cur = iv
while 1:
xhash = hashlib.sha1(bytes(password + cur)) # hashlib.sha1 in python 2.6 does not accept a bytearray argument
cur = bytearray(xhash.digest()) # make sure we iterate over ints in both Py2 and Py3
for byte in cur:
yield byte | python | def _jks_keystream(iv, password):
"""Helper keystream generator for _jks_pkey_decrypt"""
cur = iv
while 1:
xhash = hashlib.sha1(bytes(password + cur)) # hashlib.sha1 in python 2.6 does not accept a bytearray argument
cur = bytearray(xhash.digest()) # make sure we iterate over ints in both Py2 and Py3
for byte in cur:
yield byte | [
"def",
"_jks_keystream",
"(",
"iv",
",",
"password",
")",
":",
"cur",
"=",
"iv",
"while",
"1",
":",
"xhash",
"=",
"hashlib",
".",
"sha1",
"(",
"bytes",
"(",
"password",
"+",
"cur",
")",
")",
"# hashlib.sha1 in python 2.6 does not accept a bytearray argument",
... | Helper keystream generator for _jks_pkey_decrypt | [
"Helper",
"keystream",
"generator",
"for",
"_jks_pkey_decrypt"
] | 1cbe7f060e2ad076b6462f3273f11d635771ea3d | https://github.com/kurtbrose/pyjks/blob/1cbe7f060e2ad076b6462f3273f11d635771ea3d/jks/sun_crypto.py#L42-L49 | train | 213,007 |
kurtbrose/pyjks | jks/util.py | bitstring_to_bytes | def bitstring_to_bytes(bitstr):
"""
Converts a pyasn1 univ.BitString instance to byte sequence of type 'bytes'.
The bit string is interpreted big-endian and is left-padded with 0 bits to form a multiple of 8.
"""
bitlist = list(bitstr)
bits_missing = (8 - len(bitlist) % 8) % 8
bitlist = [0]*bits_missing + bitlist # pad with 0 bits to a multiple of 8
result = bytearray()
for i in range(0, len(bitlist), 8):
byte = 0
for j in range(8):
byte = (byte << 1) | bitlist[i+j]
result.append(byte)
return bytes(result) | python | def bitstring_to_bytes(bitstr):
"""
Converts a pyasn1 univ.BitString instance to byte sequence of type 'bytes'.
The bit string is interpreted big-endian and is left-padded with 0 bits to form a multiple of 8.
"""
bitlist = list(bitstr)
bits_missing = (8 - len(bitlist) % 8) % 8
bitlist = [0]*bits_missing + bitlist # pad with 0 bits to a multiple of 8
result = bytearray()
for i in range(0, len(bitlist), 8):
byte = 0
for j in range(8):
byte = (byte << 1) | bitlist[i+j]
result.append(byte)
return bytes(result) | [
"def",
"bitstring_to_bytes",
"(",
"bitstr",
")",
":",
"bitlist",
"=",
"list",
"(",
"bitstr",
")",
"bits_missing",
"=",
"(",
"8",
"-",
"len",
"(",
"bitlist",
")",
"%",
"8",
")",
"%",
"8",
"bitlist",
"=",
"[",
"0",
"]",
"*",
"bits_missing",
"+",
"bit... | Converts a pyasn1 univ.BitString instance to byte sequence of type 'bytes'.
The bit string is interpreted big-endian and is left-padded with 0 bits to form a multiple of 8. | [
"Converts",
"a",
"pyasn1",
"univ",
".",
"BitString",
"instance",
"to",
"byte",
"sequence",
"of",
"type",
"bytes",
".",
"The",
"bit",
"string",
"is",
"interpreted",
"big",
"-",
"endian",
"and",
"is",
"left",
"-",
"padded",
"with",
"0",
"bits",
"to",
"form... | 1cbe7f060e2ad076b6462f3273f11d635771ea3d | https://github.com/kurtbrose/pyjks/blob/1cbe7f060e2ad076b6462f3273f11d635771ea3d/jks/util.py#L187-L201 | train | 213,008 |
kurtbrose/pyjks | jks/bks.py | BksKeyStore._read_bks_key | def _read_bks_key(cls, data, pos, store_type):
"""Given a data stream, attempt to parse a stored BKS key entry at the given position, and return it as a BksKeyEntry."""
key_type = b1.unpack_from(data, pos)[0]; pos += 1
key_format, pos = BksKeyStore._read_utf(data, pos, kind="key format")
key_algorithm, pos = BksKeyStore._read_utf(data, pos, kind="key algorithm")
key_enc, pos = BksKeyStore._read_data(data, pos)
entry = BksKeyEntry(key_type, key_format, key_algorithm, key_enc, store_type=store_type)
return entry, pos | python | def _read_bks_key(cls, data, pos, store_type):
"""Given a data stream, attempt to parse a stored BKS key entry at the given position, and return it as a BksKeyEntry."""
key_type = b1.unpack_from(data, pos)[0]; pos += 1
key_format, pos = BksKeyStore._read_utf(data, pos, kind="key format")
key_algorithm, pos = BksKeyStore._read_utf(data, pos, kind="key algorithm")
key_enc, pos = BksKeyStore._read_data(data, pos)
entry = BksKeyEntry(key_type, key_format, key_algorithm, key_enc, store_type=store_type)
return entry, pos | [
"def",
"_read_bks_key",
"(",
"cls",
",",
"data",
",",
"pos",
",",
"store_type",
")",
":",
"key_type",
"=",
"b1",
".",
"unpack_from",
"(",
"data",
",",
"pos",
")",
"[",
"0",
"]",
"pos",
"+=",
"1",
"key_format",
",",
"pos",
"=",
"BksKeyStore",
".",
"... | Given a data stream, attempt to parse a stored BKS key entry at the given position, and return it as a BksKeyEntry. | [
"Given",
"a",
"data",
"stream",
"attempt",
"to",
"parse",
"a",
"stored",
"BKS",
"key",
"entry",
"at",
"the",
"given",
"position",
"and",
"return",
"it",
"as",
"a",
"BksKeyEntry",
"."
] | 1cbe7f060e2ad076b6462f3273f11d635771ea3d | https://github.com/kurtbrose/pyjks/blob/1cbe7f060e2ad076b6462f3273f11d635771ea3d/jks/bks.py#L355-L363 | train | 213,009 |
samdobson/image_slicer | image_slicer/main.py | calc_columns_rows | def calc_columns_rows(n):
"""
Calculate the number of columns and rows required to divide an image
into ``n`` parts.
Return a tuple of integers in the format (num_columns, num_rows)
"""
num_columns = int(ceil(sqrt(n)))
num_rows = int(ceil(n / float(num_columns)))
return (num_columns, num_rows) | python | def calc_columns_rows(n):
"""
Calculate the number of columns and rows required to divide an image
into ``n`` parts.
Return a tuple of integers in the format (num_columns, num_rows)
"""
num_columns = int(ceil(sqrt(n)))
num_rows = int(ceil(n / float(num_columns)))
return (num_columns, num_rows) | [
"def",
"calc_columns_rows",
"(",
"n",
")",
":",
"num_columns",
"=",
"int",
"(",
"ceil",
"(",
"sqrt",
"(",
"n",
")",
")",
")",
"num_rows",
"=",
"int",
"(",
"ceil",
"(",
"n",
"/",
"float",
"(",
"num_columns",
")",
")",
")",
"return",
"(",
"num_column... | Calculate the number of columns and rows required to divide an image
into ``n`` parts.
Return a tuple of integers in the format (num_columns, num_rows) | [
"Calculate",
"the",
"number",
"of",
"columns",
"and",
"rows",
"required",
"to",
"divide",
"an",
"image",
"into",
"n",
"parts",
"."
] | 54ec036f73862085156e0544fe30e61a509c06d2 | https://github.com/samdobson/image_slicer/blob/54ec036f73862085156e0544fe30e61a509c06d2/image_slicer/main.py#L58-L67 | train | 213,010 |
samdobson/image_slicer | image_slicer/main.py | get_combined_size | def get_combined_size(tiles):
"""Calculate combined size of tiles."""
# TODO: Refactor calculating layout to avoid repetition.
columns, rows = calc_columns_rows(len(tiles))
tile_size = tiles[0].image.size
return (tile_size[0] * columns, tile_size[1] * rows) | python | def get_combined_size(tiles):
"""Calculate combined size of tiles."""
# TODO: Refactor calculating layout to avoid repetition.
columns, rows = calc_columns_rows(len(tiles))
tile_size = tiles[0].image.size
return (tile_size[0] * columns, tile_size[1] * rows) | [
"def",
"get_combined_size",
"(",
"tiles",
")",
":",
"# TODO: Refactor calculating layout to avoid repetition.",
"columns",
",",
"rows",
"=",
"calc_columns_rows",
"(",
"len",
"(",
"tiles",
")",
")",
"tile_size",
"=",
"tiles",
"[",
"0",
"]",
".",
"image",
".",
"si... | Calculate combined size of tiles. | [
"Calculate",
"combined",
"size",
"of",
"tiles",
"."
] | 54ec036f73862085156e0544fe30e61a509c06d2 | https://github.com/samdobson/image_slicer/blob/54ec036f73862085156e0544fe30e61a509c06d2/image_slicer/main.py#L69-L74 | train | 213,011 |
samdobson/image_slicer | image_slicer/main.py | validate_image | def validate_image(image, number_tiles):
"""Basic sanity checks prior to performing a split."""
TILE_LIMIT = 99 * 99
try:
number_tiles = int(number_tiles)
except:
raise ValueError('number_tiles could not be cast to integer.')
if number_tiles > TILE_LIMIT or number_tiles < 2:
raise ValueError('Number of tiles must be between 2 and {} (you \
asked for {}).'.format(TILE_LIMIT, number_tiles)) | python | def validate_image(image, number_tiles):
"""Basic sanity checks prior to performing a split."""
TILE_LIMIT = 99 * 99
try:
number_tiles = int(number_tiles)
except:
raise ValueError('number_tiles could not be cast to integer.')
if number_tiles > TILE_LIMIT or number_tiles < 2:
raise ValueError('Number of tiles must be between 2 and {} (you \
asked for {}).'.format(TILE_LIMIT, number_tiles)) | [
"def",
"validate_image",
"(",
"image",
",",
"number_tiles",
")",
":",
"TILE_LIMIT",
"=",
"99",
"*",
"99",
"try",
":",
"number_tiles",
"=",
"int",
"(",
"number_tiles",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'number_tiles could not be cast to integer.'",
... | Basic sanity checks prior to performing a split. | [
"Basic",
"sanity",
"checks",
"prior",
"to",
"performing",
"a",
"split",
"."
] | 54ec036f73862085156e0544fe30e61a509c06d2 | https://github.com/samdobson/image_slicer/blob/54ec036f73862085156e0544fe30e61a509c06d2/image_slicer/main.py#L87-L98 | train | 213,012 |
samdobson/image_slicer | image_slicer/main.py | validate_image_col_row | def validate_image_col_row(image , col , row):
"""Basic checks for columns and rows values"""
SPLIT_LIMIT = 99
try:
col = int(col)
row = int(row)
except:
raise ValueError('columns and rows values could not be cast to integer.')
if col < 2:
raise ValueError('Number of columns must be between 2 and {} (you \
asked for {}).'.format(SPLIT_LIMIT, col))
if row < 2 :
raise ValueError('Number of rows must be between 2 and {} (you \
asked for {}).'.format(SPLIT_LIMIT, row)) | python | def validate_image_col_row(image , col , row):
"""Basic checks for columns and rows values"""
SPLIT_LIMIT = 99
try:
col = int(col)
row = int(row)
except:
raise ValueError('columns and rows values could not be cast to integer.')
if col < 2:
raise ValueError('Number of columns must be between 2 and {} (you \
asked for {}).'.format(SPLIT_LIMIT, col))
if row < 2 :
raise ValueError('Number of rows must be between 2 and {} (you \
asked for {}).'.format(SPLIT_LIMIT, row)) | [
"def",
"validate_image_col_row",
"(",
"image",
",",
"col",
",",
"row",
")",
":",
"SPLIT_LIMIT",
"=",
"99",
"try",
":",
"col",
"=",
"int",
"(",
"col",
")",
"row",
"=",
"int",
"(",
"row",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'columns and rows... | Basic checks for columns and rows values | [
"Basic",
"checks",
"for",
"columns",
"and",
"rows",
"values"
] | 54ec036f73862085156e0544fe30e61a509c06d2 | https://github.com/samdobson/image_slicer/blob/54ec036f73862085156e0544fe30e61a509c06d2/image_slicer/main.py#L100-L115 | train | 213,013 |
samdobson/image_slicer | image_slicer/main.py | slice | def slice(filename, number_tiles=None, col=None, row=None, save=True):
"""
Split an image into a specified number of tiles.
Args:
filename (str): The filename of the image to split.
number_tiles (int): The number of tiles required.
Kwargs:
save (bool): Whether or not to save tiles to disk.
Returns:
Tuple of :class:`Tile` instances.
"""
im = Image.open(filename)
im_w, im_h = im.size
columns = 0
rows = 0
if not number_tiles is None:
validate_image(im, number_tiles)
columns, rows = calc_columns_rows(number_tiles)
extras = (columns * rows) - number_tiles
else:
validate_image_col_row(im, col, row)
columns = col
rows = row
extras = (columns * rows) - number_tiles
tile_w, tile_h = int(floor(im_w / columns)), int(floor(im_h / rows))
tiles = []
number = 1
for pos_y in range(0, im_h - rows, tile_h): # -rows for rounding error.
for pos_x in range(0, im_w - columns, tile_w): # as above.
area = (pos_x, pos_y, pos_x + tile_w, pos_y + tile_h)
image = im.crop(area)
position = (int(floor(pos_x / tile_w)) + 1,
int(floor(pos_y / tile_h)) + 1)
coords = (pos_x, pos_y)
tile = Tile(image, number, position, coords)
tiles.append(tile)
number += 1
if save:
save_tiles(tiles,
prefix=get_basename(filename),
directory=os.path.dirname(filename))
return tuple(tiles) | python | def slice(filename, number_tiles=None, col=None, row=None, save=True):
"""
Split an image into a specified number of tiles.
Args:
filename (str): The filename of the image to split.
number_tiles (int): The number of tiles required.
Kwargs:
save (bool): Whether or not to save tiles to disk.
Returns:
Tuple of :class:`Tile` instances.
"""
im = Image.open(filename)
im_w, im_h = im.size
columns = 0
rows = 0
if not number_tiles is None:
validate_image(im, number_tiles)
columns, rows = calc_columns_rows(number_tiles)
extras = (columns * rows) - number_tiles
else:
validate_image_col_row(im, col, row)
columns = col
rows = row
extras = (columns * rows) - number_tiles
tile_w, tile_h = int(floor(im_w / columns)), int(floor(im_h / rows))
tiles = []
number = 1
for pos_y in range(0, im_h - rows, tile_h): # -rows for rounding error.
for pos_x in range(0, im_w - columns, tile_w): # as above.
area = (pos_x, pos_y, pos_x + tile_w, pos_y + tile_h)
image = im.crop(area)
position = (int(floor(pos_x / tile_w)) + 1,
int(floor(pos_y / tile_h)) + 1)
coords = (pos_x, pos_y)
tile = Tile(image, number, position, coords)
tiles.append(tile)
number += 1
if save:
save_tiles(tiles,
prefix=get_basename(filename),
directory=os.path.dirname(filename))
return tuple(tiles) | [
"def",
"slice",
"(",
"filename",
",",
"number_tiles",
"=",
"None",
",",
"col",
"=",
"None",
",",
"row",
"=",
"None",
",",
"save",
"=",
"True",
")",
":",
"im",
"=",
"Image",
".",
"open",
"(",
"filename",
")",
"im_w",
",",
"im_h",
"=",
"im",
".",
... | Split an image into a specified number of tiles.
Args:
filename (str): The filename of the image to split.
number_tiles (int): The number of tiles required.
Kwargs:
save (bool): Whether or not to save tiles to disk.
Returns:
Tuple of :class:`Tile` instances. | [
"Split",
"an",
"image",
"into",
"a",
"specified",
"number",
"of",
"tiles",
"."
] | 54ec036f73862085156e0544fe30e61a509c06d2 | https://github.com/samdobson/image_slicer/blob/54ec036f73862085156e0544fe30e61a509c06d2/image_slicer/main.py#L120-L168 | train | 213,014 |
samdobson/image_slicer | image_slicer/main.py | Tile.generate_filename | def generate_filename(self, directory=os.getcwd(), prefix='tile',
format='png', path=True):
"""Construct and return a filename for this tile."""
filename = prefix + '_{col:02d}_{row:02d}.{ext}'.format(
col=self.column, row=self.row, ext=format.lower().replace('jpeg', 'jpg'))
if not path:
return filename
return os.path.join(directory, filename) | python | def generate_filename(self, directory=os.getcwd(), prefix='tile',
format='png', path=True):
"""Construct and return a filename for this tile."""
filename = prefix + '_{col:02d}_{row:02d}.{ext}'.format(
col=self.column, row=self.row, ext=format.lower().replace('jpeg', 'jpg'))
if not path:
return filename
return os.path.join(directory, filename) | [
"def",
"generate_filename",
"(",
"self",
",",
"directory",
"=",
"os",
".",
"getcwd",
"(",
")",
",",
"prefix",
"=",
"'tile'",
",",
"format",
"=",
"'png'",
",",
"path",
"=",
"True",
")",
":",
"filename",
"=",
"prefix",
"+",
"'_{col:02d}_{row:02d}.{ext}'",
... | Construct and return a filename for this tile. | [
"Construct",
"and",
"return",
"a",
"filename",
"for",
"this",
"tile",
"."
] | 54ec036f73862085156e0544fe30e61a509c06d2 | https://github.com/samdobson/image_slicer/blob/54ec036f73862085156e0544fe30e61a509c06d2/image_slicer/main.py#L35-L42 | train | 213,015 |
samdobson/image_slicer | image_slicer/helpers.py | get_columns_rows | def get_columns_rows(filenames):
"""Derive number of columns and rows from filenames."""
tiles = []
for filename in filenames:
row, column = os.path.splitext(filename)[0][-5:].split('_')
tiles.append((int(row), int(column)))
rows = [pos[0] for pos in tiles]; columns = [pos[1] for pos in tiles]
num_rows = max(rows); num_columns = max(columns)
return (num_columns, num_rows) | python | def get_columns_rows(filenames):
"""Derive number of columns and rows from filenames."""
tiles = []
for filename in filenames:
row, column = os.path.splitext(filename)[0][-5:].split('_')
tiles.append((int(row), int(column)))
rows = [pos[0] for pos in tiles]; columns = [pos[1] for pos in tiles]
num_rows = max(rows); num_columns = max(columns)
return (num_columns, num_rows) | [
"def",
"get_columns_rows",
"(",
"filenames",
")",
":",
"tiles",
"=",
"[",
"]",
"for",
"filename",
"in",
"filenames",
":",
"row",
",",
"column",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"[",
"0",
"]",
"[",
"-",
"5",
":",
"]",
... | Derive number of columns and rows from filenames. | [
"Derive",
"number",
"of",
"columns",
"and",
"rows",
"from",
"filenames",
"."
] | 54ec036f73862085156e0544fe30e61a509c06d2 | https://github.com/samdobson/image_slicer/blob/54ec036f73862085156e0544fe30e61a509c06d2/image_slicer/helpers.py#L15-L23 | train | 213,016 |
axt/angr-utils | angrutils/expr.py | get_signed_range | def get_signed_range(se, expr):
"""
Calculate the range of the expression with signed boundaries
"""
size = expr.size()
umin = umax = smin = smax = None
if not sat_zero(se, expr):
try:
umin = se.min(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 0])
umax = se.max(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 0])
return (umin, umax)
except:
pass
try:
smin = -(1 << size) + se.min(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 1])
smax = -(1 << size) + se.max(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 1])
return (smin, smax)
except:
pass
return None
else:
try:
umax = se.max(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 0])
smin = 0
try:
smin = -(1 << size) + se.min(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 1])
except:
pass
return (smin, umax)
except:
pass
return None | python | def get_signed_range(se, expr):
"""
Calculate the range of the expression with signed boundaries
"""
size = expr.size()
umin = umax = smin = smax = None
if not sat_zero(se, expr):
try:
umin = se.min(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 0])
umax = se.max(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 0])
return (umin, umax)
except:
pass
try:
smin = -(1 << size) + se.min(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 1])
smax = -(1 << size) + se.max(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 1])
return (smin, smax)
except:
pass
return None
else:
try:
umax = se.max(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 0])
smin = 0
try:
smin = -(1 << size) + se.min(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 1])
except:
pass
return (smin, umax)
except:
pass
return None | [
"def",
"get_signed_range",
"(",
"se",
",",
"expr",
")",
":",
"size",
"=",
"expr",
".",
"size",
"(",
")",
"umin",
"=",
"umax",
"=",
"smin",
"=",
"smax",
"=",
"None",
"if",
"not",
"sat_zero",
"(",
"se",
",",
"expr",
")",
":",
"try",
":",
"umin",
... | Calculate the range of the expression with signed boundaries | [
"Calculate",
"the",
"range",
"of",
"the",
"expression",
"with",
"signed",
"boundaries"
] | 7fd826a16b511727148f727150cea1d7ba6985f5 | https://github.com/axt/angr-utils/blob/7fd826a16b511727148f727150cea1d7ba6985f5/angrutils/expr.py#L4-L36 | train | 213,017 |
dshean/pygeotools | pygeotools/lib/iolib.py | fn_check_full | def fn_check_full(fn):
"""Check for file existence
Avoids race condition, but slower than os.path.exists.
Parameters
----------
fn : str
Input filename string.
Returns
-------
status
True if file exists, False otherwise.
"""
status = True
if not os.path.isfile(fn):
status = False
else:
try:
open(fn)
except IOError:
status = False
return status | python | def fn_check_full(fn):
"""Check for file existence
Avoids race condition, but slower than os.path.exists.
Parameters
----------
fn : str
Input filename string.
Returns
-------
status
True if file exists, False otherwise.
"""
status = True
if not os.path.isfile(fn):
status = False
else:
try:
open(fn)
except IOError:
status = False
return status | [
"def",
"fn_check_full",
"(",
"fn",
")",
":",
"status",
"=",
"True",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"fn",
")",
":",
"status",
"=",
"False",
"else",
":",
"try",
":",
"open",
"(",
"fn",
")",
"except",
"IOError",
":",
"status",
"... | Check for file existence
Avoids race condition, but slower than os.path.exists.
Parameters
----------
fn : str
Input filename string.
Returns
-------
status
True if file exists, False otherwise. | [
"Check",
"for",
"file",
"existence"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/iolib.py#L48-L71 | train | 213,018 |
dshean/pygeotools | pygeotools/lib/iolib.py | fn_getma | def fn_getma(fn, bnum=1, return_ds=False):
"""Get masked array from input filename
Parameters
----------
fn : str
Input filename string
bnum : int, optional
Band number
Returns
-------
np.ma.array
Masked array containing raster values
"""
#Add check for filename existence
ds = fn_getds(fn)
out = ds_getma(ds, bnum=bnum)
if return_ds:
out = (out, ds)
return out | python | def fn_getma(fn, bnum=1, return_ds=False):
"""Get masked array from input filename
Parameters
----------
fn : str
Input filename string
bnum : int, optional
Band number
Returns
-------
np.ma.array
Masked array containing raster values
"""
#Add check for filename existence
ds = fn_getds(fn)
out = ds_getma(ds, bnum=bnum)
if return_ds:
out = (out, ds)
return out | [
"def",
"fn_getma",
"(",
"fn",
",",
"bnum",
"=",
"1",
",",
"return_ds",
"=",
"False",
")",
":",
"#Add check for filename existence",
"ds",
"=",
"fn_getds",
"(",
"fn",
")",
"out",
"=",
"ds_getma",
"(",
"ds",
",",
"bnum",
"=",
"bnum",
")",
"if",
"return_d... | Get masked array from input filename
Parameters
----------
fn : str
Input filename string
bnum : int, optional
Band number
Returns
-------
np.ma.array
Masked array containing raster values | [
"Get",
"masked",
"array",
"from",
"input",
"filename"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/iolib.py#L103-L123 | train | 213,019 |
dshean/pygeotools | pygeotools/lib/iolib.py | b_getma | def b_getma(b):
"""Get masked array from input GDAL Band
Parameters
----------
b : gdal.Band
Input GDAL Band
Returns
-------
np.ma.array
Masked array containing raster values
"""
b_ndv = get_ndv_b(b)
#bma = np.ma.masked_equal(b.ReadAsArray(), b_ndv)
#This is more appropriate for float, handles precision issues
bma = np.ma.masked_values(b.ReadAsArray(), b_ndv)
return bma | python | def b_getma(b):
"""Get masked array from input GDAL Band
Parameters
----------
b : gdal.Band
Input GDAL Band
Returns
-------
np.ma.array
Masked array containing raster values
"""
b_ndv = get_ndv_b(b)
#bma = np.ma.masked_equal(b.ReadAsArray(), b_ndv)
#This is more appropriate for float, handles precision issues
bma = np.ma.masked_values(b.ReadAsArray(), b_ndv)
return bma | [
"def",
"b_getma",
"(",
"b",
")",
":",
"b_ndv",
"=",
"get_ndv_b",
"(",
"b",
")",
"#bma = np.ma.masked_equal(b.ReadAsArray(), b_ndv)",
"#This is more appropriate for float, handles precision issues",
"bma",
"=",
"np",
".",
"ma",
".",
"masked_values",
"(",
"b",
".",
"Rea... | Get masked array from input GDAL Band
Parameters
----------
b : gdal.Band
Input GDAL Band
Returns
-------
np.ma.array
Masked array containing raster values | [
"Get",
"masked",
"array",
"from",
"input",
"GDAL",
"Band"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/iolib.py#L145-L162 | train | 213,020 |
dshean/pygeotools | pygeotools/lib/iolib.py | get_sub_dim | def get_sub_dim(src_ds, scale=None, maxdim=1024):
"""Compute dimensions of subsampled dataset
Parameters
----------
ds : gdal.Dataset
Input GDAL Datset
scale : int, optional
Scaling factor
maxdim : int, optional
Maximum dimension along either axis, in pixels
Returns
-------
ns
Numper of samples in subsampled output
nl
Numper of lines in subsampled output
scale
Final scaling factor
"""
ns = src_ds.RasterXSize
nl = src_ds.RasterYSize
maxdim = float(maxdim)
if scale is None:
scale_ns = ns/maxdim
scale_nl = nl/maxdim
scale = max(scale_ns, scale_nl)
#Need to check to make sure scale is positive real
if scale > 1:
ns = int(round(ns/scale))
nl = int(round(nl/scale))
return ns, nl, scale | python | def get_sub_dim(src_ds, scale=None, maxdim=1024):
"""Compute dimensions of subsampled dataset
Parameters
----------
ds : gdal.Dataset
Input GDAL Datset
scale : int, optional
Scaling factor
maxdim : int, optional
Maximum dimension along either axis, in pixels
Returns
-------
ns
Numper of samples in subsampled output
nl
Numper of lines in subsampled output
scale
Final scaling factor
"""
ns = src_ds.RasterXSize
nl = src_ds.RasterYSize
maxdim = float(maxdim)
if scale is None:
scale_ns = ns/maxdim
scale_nl = nl/maxdim
scale = max(scale_ns, scale_nl)
#Need to check to make sure scale is positive real
if scale > 1:
ns = int(round(ns/scale))
nl = int(round(nl/scale))
return ns, nl, scale | [
"def",
"get_sub_dim",
"(",
"src_ds",
",",
"scale",
"=",
"None",
",",
"maxdim",
"=",
"1024",
")",
":",
"ns",
"=",
"src_ds",
".",
"RasterXSize",
"nl",
"=",
"src_ds",
".",
"RasterYSize",
"maxdim",
"=",
"float",
"(",
"maxdim",
")",
"if",
"scale",
"is",
"... | Compute dimensions of subsampled dataset
Parameters
----------
ds : gdal.Dataset
Input GDAL Datset
scale : int, optional
Scaling factor
maxdim : int, optional
Maximum dimension along either axis, in pixels
Returns
-------
ns
Numper of samples in subsampled output
nl
Numper of lines in subsampled output
scale
Final scaling factor | [
"Compute",
"dimensions",
"of",
"subsampled",
"dataset"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/iolib.py#L164-L196 | train | 213,021 |
dshean/pygeotools | pygeotools/lib/iolib.py | ds_getma_sub | def ds_getma_sub(src_ds, bnum=1, scale=None, maxdim=1024., return_ds=False):
"""Load a subsampled array, rather than full resolution
This is useful when working with large rasters
Uses buf_xsize and buf_ysize options from GDAL ReadAsArray method.
Parameters
----------
ds : gdal.Dataset
Input GDAL Datset
bnum : int, optional
Band number
scale : int, optional
Scaling factor
maxdim : int, optional
Maximum dimension along either axis, in pixels
Returns
-------
np.ma.array
Masked array containing raster values
"""
#print src_ds.GetFileList()[0]
b = src_ds.GetRasterBand(bnum)
b_ndv = get_ndv_b(b)
ns, nl, scale = get_sub_dim(src_ds, scale, maxdim)
#The buf_size parameters determine the final array dimensions
b_array = b.ReadAsArray(buf_xsize=ns, buf_ysize=nl)
bma = np.ma.masked_values(b_array, b_ndv)
out = bma
if return_ds:
dtype = src_ds.GetRasterBand(1).DataType
src_ds_sub = gdal.GetDriverByName('MEM').Create('', ns, nl, 1, dtype)
gt = np.array(src_ds.GetGeoTransform())
gt[[1,5]] = gt[[1,5]]*scale
src_ds_sub.SetGeoTransform(list(gt))
src_ds_sub.SetProjection(src_ds.GetProjection())
b = src_ds_sub.GetRasterBand(1)
b.WriteArray(bma)
b.SetNoDataValue(b_ndv)
out = (bma, src_ds_sub)
return out | python | def ds_getma_sub(src_ds, bnum=1, scale=None, maxdim=1024., return_ds=False):
"""Load a subsampled array, rather than full resolution
This is useful when working with large rasters
Uses buf_xsize and buf_ysize options from GDAL ReadAsArray method.
Parameters
----------
ds : gdal.Dataset
Input GDAL Datset
bnum : int, optional
Band number
scale : int, optional
Scaling factor
maxdim : int, optional
Maximum dimension along either axis, in pixels
Returns
-------
np.ma.array
Masked array containing raster values
"""
#print src_ds.GetFileList()[0]
b = src_ds.GetRasterBand(bnum)
b_ndv = get_ndv_b(b)
ns, nl, scale = get_sub_dim(src_ds, scale, maxdim)
#The buf_size parameters determine the final array dimensions
b_array = b.ReadAsArray(buf_xsize=ns, buf_ysize=nl)
bma = np.ma.masked_values(b_array, b_ndv)
out = bma
if return_ds:
dtype = src_ds.GetRasterBand(1).DataType
src_ds_sub = gdal.GetDriverByName('MEM').Create('', ns, nl, 1, dtype)
gt = np.array(src_ds.GetGeoTransform())
gt[[1,5]] = gt[[1,5]]*scale
src_ds_sub.SetGeoTransform(list(gt))
src_ds_sub.SetProjection(src_ds.GetProjection())
b = src_ds_sub.GetRasterBand(1)
b.WriteArray(bma)
b.SetNoDataValue(b_ndv)
out = (bma, src_ds_sub)
return out | [
"def",
"ds_getma_sub",
"(",
"src_ds",
",",
"bnum",
"=",
"1",
",",
"scale",
"=",
"None",
",",
"maxdim",
"=",
"1024.",
",",
"return_ds",
"=",
"False",
")",
":",
"#print src_ds.GetFileList()[0]",
"b",
"=",
"src_ds",
".",
"GetRasterBand",
"(",
"bnum",
")",
"... | Load a subsampled array, rather than full resolution
This is useful when working with large rasters
Uses buf_xsize and buf_ysize options from GDAL ReadAsArray method.
Parameters
----------
ds : gdal.Dataset
Input GDAL Datset
bnum : int, optional
Band number
scale : int, optional
Scaling factor
maxdim : int, optional
Maximum dimension along either axis, in pixels
Returns
-------
np.ma.array
Masked array containing raster values | [
"Load",
"a",
"subsampled",
"array",
"rather",
"than",
"full",
"resolution"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/iolib.py#L205-L247 | train | 213,022 |
dshean/pygeotools | pygeotools/lib/iolib.py | writeGTiff | def writeGTiff(a, dst_fn, src_ds=None, bnum=1, ndv=None, gt=None, proj=None, create=False, sparse=False):
"""Write input array to disk as GeoTiff
Parameters
----------
a : np.array or np.ma.array
Input array
dst_fn : str
Output filename
src_ds: GDAL Dataset, optional
Source Dataset to use for creating copy
bnum : int, optional
Output band
ndv : float, optional
Output NoData Value
gt : list, optional
Output GeoTransform
proj : str, optional
Output Projection (OGC WKT or PROJ.4 format)
create : bool, optional
Create new dataset
sparse : bool, optional
Output should be created with sparse options
"""
#If input is not np.ma, this creates a new ma, which has default filL_value of 1E20
#Must manually override with ndv
#Also consumes a lot of memory
#Should bypass if input is bool
from pygeotools.lib.malib import checkma
a = checkma(a, fix=False)
#Want to preserve fill_value if already specified
if ndv is not None:
a.set_fill_value(ndv)
driver = gtif_drv
#Currently only support writing singleband rasters
#if a.ndim > 2:
# np_nbands = a.shape[2]
# if src_ds.RasterCount np_nbands:
# for bnum in np_nbands:
nbands = 1
np_dt = a.dtype.name
if src_ds is not None:
#If this is a fn, get a ds
#Note: this saves a lot of unnecessary iolib.fn_getds calls
if isinstance(src_ds, str):
src_ds = fn_getds(src_ds)
#if isinstance(src_ds, gdal.Dataset):
src_dt = gdal.GetDataTypeName(src_ds.GetRasterBand(bnum).DataType)
src_gt = src_ds.GetGeoTransform()
#This is WKT
src_proj = src_ds.GetProjection()
#src_srs = osr.SpatialReference()
#src_srs.ImportFromWkt(src_ds.GetProjectionRef())
#Probably a cleaner way to handle this
if gt is None:
gt = src_gt
if proj is None:
proj = src_proj
#Need to create a new copy of the default options
opt = list(gdal_opt)
#Note: packbits is better for sparse data
if sparse:
opt.remove('COMPRESS=LZW')
opt.append('COMPRESS=PACKBITS')
#Not sure if VW can handle sparse tif
#opt.append('SPARSE_OK=TRUE')
#Use predictor=3 for floating point data
if 'float' in np_dt.lower() and 'COMPRESS=LZW' in opt:
opt.append('PREDICTOR=3')
#If input ma is same as src_ds, write out array using CreateCopy from existing dataset
#if not create and (src_ds is not None) and ((a.shape[0] == src_ds.RasterYSize) and (a.shape[1] == src_ds.RasterXSize) and (np_dt.lower() == src_dt.lower())):
#Should compare srs.IsSame(src_srs)
if not create and (src_ds is not None) and ((a.shape[0] == src_ds.RasterYSize) and (a.shape[1] == src_ds.RasterXSize) and (np_dt.lower() == src_dt.lower())) and (src_gt == gt) and (src_proj == proj):
#Note: third option is strict flag, set to false
dst_ds = driver.CreateCopy(dst_fn, src_ds, 0, options=opt)
#Otherwise, use Create
else:
a_dtype = a.dtype
gdal_dtype = np2gdal_dtype(a_dtype)
if a_dtype.name == 'bool':
#Set ndv to 0
a.fill_value = False
opt.remove('COMPRESS=LZW')
opt.append('COMPRESS=DEFLATE')
#opt.append('NBITS=1')
#Create(fn, nx, ny, nbands, dtype, opt)
dst_ds = driver.Create(dst_fn, a.shape[1], a.shape[0], nbands, gdal_dtype, options=opt)
#Note: Need GeoMA here to make this work, or accept gt as argument
#Could also do ds creation in calling script
if gt is not None:
dst_ds.SetGeoTransform(gt)
if proj is not None:
dst_ds.SetProjection(proj)
dst_ds.GetRasterBand(bnum).WriteArray(a.filled())
dst_ds.GetRasterBand(bnum).SetNoDataValue(float(a.fill_value))
dst_ds = None | python | def writeGTiff(a, dst_fn, src_ds=None, bnum=1, ndv=None, gt=None, proj=None, create=False, sparse=False):
"""Write input array to disk as GeoTiff
Parameters
----------
a : np.array or np.ma.array
Input array
dst_fn : str
Output filename
src_ds: GDAL Dataset, optional
Source Dataset to use for creating copy
bnum : int, optional
Output band
ndv : float, optional
Output NoData Value
gt : list, optional
Output GeoTransform
proj : str, optional
Output Projection (OGC WKT or PROJ.4 format)
create : bool, optional
Create new dataset
sparse : bool, optional
Output should be created with sparse options
"""
#If input is not np.ma, this creates a new ma, which has default filL_value of 1E20
#Must manually override with ndv
#Also consumes a lot of memory
#Should bypass if input is bool
from pygeotools.lib.malib import checkma
a = checkma(a, fix=False)
#Want to preserve fill_value if already specified
if ndv is not None:
a.set_fill_value(ndv)
driver = gtif_drv
#Currently only support writing singleband rasters
#if a.ndim > 2:
# np_nbands = a.shape[2]
# if src_ds.RasterCount np_nbands:
# for bnum in np_nbands:
nbands = 1
np_dt = a.dtype.name
if src_ds is not None:
#If this is a fn, get a ds
#Note: this saves a lot of unnecessary iolib.fn_getds calls
if isinstance(src_ds, str):
src_ds = fn_getds(src_ds)
#if isinstance(src_ds, gdal.Dataset):
src_dt = gdal.GetDataTypeName(src_ds.GetRasterBand(bnum).DataType)
src_gt = src_ds.GetGeoTransform()
#This is WKT
src_proj = src_ds.GetProjection()
#src_srs = osr.SpatialReference()
#src_srs.ImportFromWkt(src_ds.GetProjectionRef())
#Probably a cleaner way to handle this
if gt is None:
gt = src_gt
if proj is None:
proj = src_proj
#Need to create a new copy of the default options
opt = list(gdal_opt)
#Note: packbits is better for sparse data
if sparse:
opt.remove('COMPRESS=LZW')
opt.append('COMPRESS=PACKBITS')
#Not sure if VW can handle sparse tif
#opt.append('SPARSE_OK=TRUE')
#Use predictor=3 for floating point data
if 'float' in np_dt.lower() and 'COMPRESS=LZW' in opt:
opt.append('PREDICTOR=3')
#If input ma is same as src_ds, write out array using CreateCopy from existing dataset
#if not create and (src_ds is not None) and ((a.shape[0] == src_ds.RasterYSize) and (a.shape[1] == src_ds.RasterXSize) and (np_dt.lower() == src_dt.lower())):
#Should compare srs.IsSame(src_srs)
if not create and (src_ds is not None) and ((a.shape[0] == src_ds.RasterYSize) and (a.shape[1] == src_ds.RasterXSize) and (np_dt.lower() == src_dt.lower())) and (src_gt == gt) and (src_proj == proj):
#Note: third option is strict flag, set to false
dst_ds = driver.CreateCopy(dst_fn, src_ds, 0, options=opt)
#Otherwise, use Create
else:
a_dtype = a.dtype
gdal_dtype = np2gdal_dtype(a_dtype)
if a_dtype.name == 'bool':
#Set ndv to 0
a.fill_value = False
opt.remove('COMPRESS=LZW')
opt.append('COMPRESS=DEFLATE')
#opt.append('NBITS=1')
#Create(fn, nx, ny, nbands, dtype, opt)
dst_ds = driver.Create(dst_fn, a.shape[1], a.shape[0], nbands, gdal_dtype, options=opt)
#Note: Need GeoMA here to make this work, or accept gt as argument
#Could also do ds creation in calling script
if gt is not None:
dst_ds.SetGeoTransform(gt)
if proj is not None:
dst_ds.SetProjection(proj)
dst_ds.GetRasterBand(bnum).WriteArray(a.filled())
dst_ds.GetRasterBand(bnum).SetNoDataValue(float(a.fill_value))
dst_ds = None | [
"def",
"writeGTiff",
"(",
"a",
",",
"dst_fn",
",",
"src_ds",
"=",
"None",
",",
"bnum",
"=",
"1",
",",
"ndv",
"=",
"None",
",",
"gt",
"=",
"None",
",",
"proj",
"=",
"None",
",",
"create",
"=",
"False",
",",
"sparse",
"=",
"False",
")",
":",
"#If... | Write input array to disk as GeoTiff
Parameters
----------
a : np.array or np.ma.array
Input array
dst_fn : str
Output filename
src_ds: GDAL Dataset, optional
Source Dataset to use for creating copy
bnum : int, optional
Output band
ndv : float, optional
Output NoData Value
gt : list, optional
Output GeoTransform
proj : str, optional
Output Projection (OGC WKT or PROJ.4 format)
create : bool, optional
Create new dataset
sparse : bool, optional
Output should be created with sparse options | [
"Write",
"input",
"array",
"to",
"disk",
"as",
"GeoTiff"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/iolib.py#L252-L353 | train | 213,023 |
dshean/pygeotools | pygeotools/lib/iolib.py | writevrt | def writevrt(out_csv,srs='EPSG:4326',x='field_1',y='field_2'):
"""
Write out a vrt to accompany a csv of points
"""
out_vrt = os.path.splitext(out_csv)[0]+'.vrt'
out_csv = os.path.split(out_csv)[-1]
f = open(out_vrt, 'w')
f.write('<OGRVRTDataSource>\n')
f.write(' <OGRVRTLayer name="%s">\n' % os.path.splitext(out_csv)[0])
f.write(' <SrcDataSource>%s</SrcDataSource>\n' % out_csv)
f.write(' <GeometryType>wkbPoint</GeometryType>\n')
f.write(' <LayerSRS>%s</LayerSRS>\n' % srs)
f.write(' <GeometryField encoding="PointFromColumns" x="%s" y="%s"/>\n' % (x, y))
f.write(' </OGRVRTLayer>\n')
f.write('</OGRVRTDataSource>\n')
f.close() | python | def writevrt(out_csv,srs='EPSG:4326',x='field_1',y='field_2'):
"""
Write out a vrt to accompany a csv of points
"""
out_vrt = os.path.splitext(out_csv)[0]+'.vrt'
out_csv = os.path.split(out_csv)[-1]
f = open(out_vrt, 'w')
f.write('<OGRVRTDataSource>\n')
f.write(' <OGRVRTLayer name="%s">\n' % os.path.splitext(out_csv)[0])
f.write(' <SrcDataSource>%s</SrcDataSource>\n' % out_csv)
f.write(' <GeometryType>wkbPoint</GeometryType>\n')
f.write(' <LayerSRS>%s</LayerSRS>\n' % srs)
f.write(' <GeometryField encoding="PointFromColumns" x="%s" y="%s"/>\n' % (x, y))
f.write(' </OGRVRTLayer>\n')
f.write('</OGRVRTDataSource>\n')
f.close() | [
"def",
"writevrt",
"(",
"out_csv",
",",
"srs",
"=",
"'EPSG:4326'",
",",
"x",
"=",
"'field_1'",
",",
"y",
"=",
"'field_2'",
")",
":",
"out_vrt",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"out_csv",
")",
"[",
"0",
"]",
"+",
"'.vrt'",
"out_csv",
"... | Write out a vrt to accompany a csv of points | [
"Write",
"out",
"a",
"vrt",
"to",
"accompany",
"a",
"csv",
"of",
"points"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/iolib.py#L355-L370 | train | 213,024 |
dshean/pygeotools | pygeotools/lib/iolib.py | np2gdal_dtype | def np2gdal_dtype(d):
"""
Get GDAL RasterBand datatype that corresponds with NumPy datatype
Input should be numpy array or numpy dtype
"""
dt_dict = gdal_array.codes
if isinstance(d, (np.ndarray, np.generic)):
d = d.dtype
#This creates dtype from another built-in type
#d = np.dtype(d)
if isinstance(d, np.dtype):
if d.name == 'int8':
gdal_dt = 1
elif d.name == 'bool':
#Write out as Byte
gdal_dt = 1
else:
gdal_dt = list(dt_dict.keys())[list(dt_dict.values()).index(d)]
else:
print("Input must be NumPy array or NumPy dtype")
gdal_dt = None
return gdal_dt | python | def np2gdal_dtype(d):
"""
Get GDAL RasterBand datatype that corresponds with NumPy datatype
Input should be numpy array or numpy dtype
"""
dt_dict = gdal_array.codes
if isinstance(d, (np.ndarray, np.generic)):
d = d.dtype
#This creates dtype from another built-in type
#d = np.dtype(d)
if isinstance(d, np.dtype):
if d.name == 'int8':
gdal_dt = 1
elif d.name == 'bool':
#Write out as Byte
gdal_dt = 1
else:
gdal_dt = list(dt_dict.keys())[list(dt_dict.values()).index(d)]
else:
print("Input must be NumPy array or NumPy dtype")
gdal_dt = None
return gdal_dt | [
"def",
"np2gdal_dtype",
"(",
"d",
")",
":",
"dt_dict",
"=",
"gdal_array",
".",
"codes",
"if",
"isinstance",
"(",
"d",
",",
"(",
"np",
".",
"ndarray",
",",
"np",
".",
"generic",
")",
")",
":",
"d",
"=",
"d",
".",
"dtype",
"#This creates dtype from anoth... | Get GDAL RasterBand datatype that corresponds with NumPy datatype
Input should be numpy array or numpy dtype | [
"Get",
"GDAL",
"RasterBand",
"datatype",
"that",
"corresponds",
"with",
"NumPy",
"datatype",
"Input",
"should",
"be",
"numpy",
"array",
"or",
"numpy",
"dtype"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/iolib.py#L374-L395 | train | 213,025 |
dshean/pygeotools | pygeotools/lib/iolib.py | gdal2np_dtype | def gdal2np_dtype(b):
"""
Get NumPy datatype that corresponds with GDAL RasterBand datatype
Input can be filename, GDAL Dataset, GDAL RasterBand, or GDAL integer dtype
"""
dt_dict = gdal_array.codes
if isinstance(b, str):
b = gdal.Open(b)
if isinstance(b, gdal.Dataset):
b = b.GetRasterBand(1)
if isinstance(b, gdal.Band):
b = b.DataType
if isinstance(b, int):
np_dtype = dt_dict[b]
else:
np_dtype = None
print("Input must be GDAL Dataset or RasterBand object")
return np_dtype | python | def gdal2np_dtype(b):
"""
Get NumPy datatype that corresponds with GDAL RasterBand datatype
Input can be filename, GDAL Dataset, GDAL RasterBand, or GDAL integer dtype
"""
dt_dict = gdal_array.codes
if isinstance(b, str):
b = gdal.Open(b)
if isinstance(b, gdal.Dataset):
b = b.GetRasterBand(1)
if isinstance(b, gdal.Band):
b = b.DataType
if isinstance(b, int):
np_dtype = dt_dict[b]
else:
np_dtype = None
print("Input must be GDAL Dataset or RasterBand object")
return np_dtype | [
"def",
"gdal2np_dtype",
"(",
"b",
")",
":",
"dt_dict",
"=",
"gdal_array",
".",
"codes",
"if",
"isinstance",
"(",
"b",
",",
"str",
")",
":",
"b",
"=",
"gdal",
".",
"Open",
"(",
"b",
")",
"if",
"isinstance",
"(",
"b",
",",
"gdal",
".",
"Dataset",
"... | Get NumPy datatype that corresponds with GDAL RasterBand datatype
Input can be filename, GDAL Dataset, GDAL RasterBand, or GDAL integer dtype | [
"Get",
"NumPy",
"datatype",
"that",
"corresponds",
"with",
"GDAL",
"RasterBand",
"datatype",
"Input",
"can",
"be",
"filename",
"GDAL",
"Dataset",
"GDAL",
"RasterBand",
"or",
"GDAL",
"integer",
"dtype"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/iolib.py#L397-L414 | train | 213,026 |
dshean/pygeotools | pygeotools/lib/iolib.py | get_ndv_b | def get_ndv_b(b):
"""Get NoData value for GDAL band.
If NoDataValue is not set in the band,
extract upper left and lower right pixel values.
Otherwise assume NoDataValue is 0.
Parameters
----------
b : GDALRasterBand object
This is the input band.
Returns
-------
b_ndv : float
NoData value
"""
b_ndv = b.GetNoDataValue()
if b_ndv is None:
#Check ul pixel for ndv
ns = b.XSize
nl = b.YSize
ul = float(b.ReadAsArray(0, 0, 1, 1))
#ur = float(b.ReadAsArray(ns-1, 0, 1, 1))
lr = float(b.ReadAsArray(ns-1, nl-1, 1, 1))
#ll = float(b.ReadAsArray(0, nl-1, 1, 1))
#Probably better to use 3/4 corner criterion
#if ul == ur == lr == ll:
if np.isnan(ul) or ul == lr:
b_ndv = ul
else:
#Assume ndv is 0
b_ndv = 0
elif np.isnan(b_ndv):
b_dt = gdal.GetDataTypeName(b.DataType)
if 'Float' in b_dt:
b_ndv = np.nan
else:
b_ndv = 0
return b_ndv | python | def get_ndv_b(b):
"""Get NoData value for GDAL band.
If NoDataValue is not set in the band,
extract upper left and lower right pixel values.
Otherwise assume NoDataValue is 0.
Parameters
----------
b : GDALRasterBand object
This is the input band.
Returns
-------
b_ndv : float
NoData value
"""
b_ndv = b.GetNoDataValue()
if b_ndv is None:
#Check ul pixel for ndv
ns = b.XSize
nl = b.YSize
ul = float(b.ReadAsArray(0, 0, 1, 1))
#ur = float(b.ReadAsArray(ns-1, 0, 1, 1))
lr = float(b.ReadAsArray(ns-1, nl-1, 1, 1))
#ll = float(b.ReadAsArray(0, nl-1, 1, 1))
#Probably better to use 3/4 corner criterion
#if ul == ur == lr == ll:
if np.isnan(ul) or ul == lr:
b_ndv = ul
else:
#Assume ndv is 0
b_ndv = 0
elif np.isnan(b_ndv):
b_dt = gdal.GetDataTypeName(b.DataType)
if 'Float' in b_dt:
b_ndv = np.nan
else:
b_ndv = 0
return b_ndv | [
"def",
"get_ndv_b",
"(",
"b",
")",
":",
"b_ndv",
"=",
"b",
".",
"GetNoDataValue",
"(",
")",
"if",
"b_ndv",
"is",
"None",
":",
"#Check ul pixel for ndv",
"ns",
"=",
"b",
".",
"XSize",
"nl",
"=",
"b",
".",
"YSize",
"ul",
"=",
"float",
"(",
"b",
".",
... | Get NoData value for GDAL band.
If NoDataValue is not set in the band,
extract upper left and lower right pixel values.
Otherwise assume NoDataValue is 0.
Parameters
----------
b : GDALRasterBand object
This is the input band.
Returns
-------
b_ndv : float
NoData value | [
"Get",
"NoData",
"value",
"for",
"GDAL",
"band",
"."
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/iolib.py#L444-L484 | train | 213,027 |
dshean/pygeotools | pygeotools/lib/iolib.py | cpu_count | def cpu_count(logical=True):
"""Return system CPU count
"""
if logical:
from multiprocessing import cpu_count
ncpu=cpu_count()
else:
import psutil
ncpu=psutil.cpu_count(logical=False)
return ncpu | python | def cpu_count(logical=True):
"""Return system CPU count
"""
if logical:
from multiprocessing import cpu_count
ncpu=cpu_count()
else:
import psutil
ncpu=psutil.cpu_count(logical=False)
return ncpu | [
"def",
"cpu_count",
"(",
"logical",
"=",
"True",
")",
":",
"if",
"logical",
":",
"from",
"multiprocessing",
"import",
"cpu_count",
"ncpu",
"=",
"cpu_count",
"(",
")",
"else",
":",
"import",
"psutil",
"ncpu",
"=",
"psutil",
".",
"cpu_count",
"(",
"logical",... | Return system CPU count | [
"Return",
"system",
"CPU",
"count"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/iolib.py#L506-L515 | train | 213,028 |
dshean/pygeotools | pygeotools/lib/iolib.py | getfile | def getfile(url, outdir=None):
"""Function to fetch files using urllib
Works with ftp
"""
fn = os.path.split(url)[-1]
if outdir is not None:
fn = os.path.join(outdir, fn)
if not os.path.exists(fn):
#Find appropriate urlretrieve for Python 2 and 3
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
print("Retrieving: %s" % url)
#Add progress bar
urlretrieve(url, fn)
return fn | python | def getfile(url, outdir=None):
"""Function to fetch files using urllib
Works with ftp
"""
fn = os.path.split(url)[-1]
if outdir is not None:
fn = os.path.join(outdir, fn)
if not os.path.exists(fn):
#Find appropriate urlretrieve for Python 2 and 3
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
print("Retrieving: %s" % url)
#Add progress bar
urlretrieve(url, fn)
return fn | [
"def",
"getfile",
"(",
"url",
",",
"outdir",
"=",
"None",
")",
":",
"fn",
"=",
"os",
".",
"path",
".",
"split",
"(",
"url",
")",
"[",
"-",
"1",
"]",
"if",
"outdir",
"is",
"not",
"None",
":",
"fn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"... | Function to fetch files using urllib
Works with ftp | [
"Function",
"to",
"fetch",
"files",
"using",
"urllib"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/iolib.py#L544-L562 | train | 213,029 |
dshean/pygeotools | pygeotools/lib/iolib.py | getfile2 | def getfile2(url, auth=None, outdir=None):
"""Function to fetch files using requests
Works with https authentication
"""
import requests
print("Retrieving: %s" % url)
fn = os.path.split(url)[-1]
if outdir is not None:
fn = os.path.join(outdir, fn)
if auth is not None:
r = requests.get(url, stream=True, auth=auth)
else:
r = requests.get(url, stream=True)
chunk_size = 1000000
with open(fn, 'wb') as fd:
for chunk in r.iter_content(chunk_size):
fd.write(chunk) | python | def getfile2(url, auth=None, outdir=None):
"""Function to fetch files using requests
Works with https authentication
"""
import requests
print("Retrieving: %s" % url)
fn = os.path.split(url)[-1]
if outdir is not None:
fn = os.path.join(outdir, fn)
if auth is not None:
r = requests.get(url, stream=True, auth=auth)
else:
r = requests.get(url, stream=True)
chunk_size = 1000000
with open(fn, 'wb') as fd:
for chunk in r.iter_content(chunk_size):
fd.write(chunk) | [
"def",
"getfile2",
"(",
"url",
",",
"auth",
"=",
"None",
",",
"outdir",
"=",
"None",
")",
":",
"import",
"requests",
"print",
"(",
"\"Retrieving: %s\"",
"%",
"url",
")",
"fn",
"=",
"os",
".",
"path",
".",
"split",
"(",
"url",
")",
"[",
"-",
"1",
... | Function to fetch files using requests
Works with https authentication | [
"Function",
"to",
"fetch",
"files",
"using",
"requests"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/iolib.py#L566-L584 | train | 213,030 |
dshean/pygeotools | pygeotools/lib/iolib.py | get_auth | def get_auth():
"""Get authorization token for https
"""
import getpass
from requests.auth import HTTPDigestAuth
#This binds raw_input to input for Python 2
input_func = input
try:
input_func = raw_input
except NameError:
pass
uname = input_func("MODSCAG Username:")
pw = getpass.getpass("MODSCAG Password:")
auth = HTTPDigestAuth(uname, pw)
#wget -A'h8v4*snow_fraction.tif' --user=uname --password=pw
return auth | python | def get_auth():
"""Get authorization token for https
"""
import getpass
from requests.auth import HTTPDigestAuth
#This binds raw_input to input for Python 2
input_func = input
try:
input_func = raw_input
except NameError:
pass
uname = input_func("MODSCAG Username:")
pw = getpass.getpass("MODSCAG Password:")
auth = HTTPDigestAuth(uname, pw)
#wget -A'h8v4*snow_fraction.tif' --user=uname --password=pw
return auth | [
"def",
"get_auth",
"(",
")",
":",
"import",
"getpass",
"from",
"requests",
".",
"auth",
"import",
"HTTPDigestAuth",
"#This binds raw_input to input for Python 2",
"input_func",
"=",
"input",
"try",
":",
"input_func",
"=",
"raw_input",
"except",
"NameError",
":",
"pa... | Get authorization token for https | [
"Get",
"authorization",
"token",
"for",
"https"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/iolib.py#L587-L602 | train | 213,031 |
dshean/pygeotools | pygeotools/lib/iolib.py | readcsv | def readcsv(fn):
"""
Wrapper to read arbitrary csv, check for header
Needs some work to be more robust, quickly added for demcoreg sampling
"""
import csv
#Check first line for header
with open(fn, 'r') as f:
reader = csv.DictReader(f)
hdr = reader.fieldnames
#Assume there is a header on first line, check
skiprows = 1
if np.all(f.isdigit() for f in hdr):
hdr = None
skiprows = 0
#Check header for lat/lon/z or x/y/z tags
#Should probably do genfromtxt here if header exists and dtype of cols is variable
pts = np.loadtxt(fn, delimiter=',', skiprows=skiprows, dtype=None)
return pts | python | def readcsv(fn):
"""
Wrapper to read arbitrary csv, check for header
Needs some work to be more robust, quickly added for demcoreg sampling
"""
import csv
#Check first line for header
with open(fn, 'r') as f:
reader = csv.DictReader(f)
hdr = reader.fieldnames
#Assume there is a header on first line, check
skiprows = 1
if np.all(f.isdigit() for f in hdr):
hdr = None
skiprows = 0
#Check header for lat/lon/z or x/y/z tags
#Should probably do genfromtxt here if header exists and dtype of cols is variable
pts = np.loadtxt(fn, delimiter=',', skiprows=skiprows, dtype=None)
return pts | [
"def",
"readcsv",
"(",
"fn",
")",
":",
"import",
"csv",
"#Check first line for header",
"with",
"open",
"(",
"fn",
",",
"'r'",
")",
"as",
"f",
":",
"reader",
"=",
"csv",
".",
"DictReader",
"(",
"f",
")",
"hdr",
"=",
"reader",
".",
"fieldnames",
"#Assum... | Wrapper to read arbitrary csv, check for header
Needs some work to be more robust, quickly added for demcoreg sampling | [
"Wrapper",
"to",
"read",
"arbitrary",
"csv",
"check",
"for",
"header"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/iolib.py#L604-L626 | train | 213,032 |
dshean/pygeotools | pygeotools/lib/filtlib.py | absrange_fltr | def absrange_fltr(dem, rangelim):
"""Absolute range filter
"""
out = range_fltr(np.ma.abs(dem), *rangelim)
#Apply mask to original input
out = np.ma.array(dem, mask=np.ma.getmaskarray(out))
out.set_fill_value(dem.fill_value)
return out | python | def absrange_fltr(dem, rangelim):
"""Absolute range filter
"""
out = range_fltr(np.ma.abs(dem), *rangelim)
#Apply mask to original input
out = np.ma.array(dem, mask=np.ma.getmaskarray(out))
out.set_fill_value(dem.fill_value)
return out | [
"def",
"absrange_fltr",
"(",
"dem",
",",
"rangelim",
")",
":",
"out",
"=",
"range_fltr",
"(",
"np",
".",
"ma",
".",
"abs",
"(",
"dem",
")",
",",
"*",
"rangelim",
")",
"#Apply mask to original input",
"out",
"=",
"np",
".",
"ma",
".",
"array",
"(",
"d... | Absolute range filter | [
"Absolute",
"range",
"filter"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/filtlib.py#L26-L33 | train | 213,033 |
dshean/pygeotools | pygeotools/lib/filtlib.py | gauss_fltr_astropy | def gauss_fltr_astropy(dem, size=None, sigma=None, origmask=False, fill_interior=False):
"""Astropy gaussian filter properly handles convolution with NaN
http://stackoverflow.com/questions/23832852/by-which-measures-should-i-set-the-size-of-my-gaussian-filter-in-matlab
width1 = 3; sigma1 = (width1-1) / 6;
Specify width for smallest feature of interest and determine sigma appropriately
sigma is width of 1 std in pixels (not multiplier)
scipy and astropy both use cutoff of 4*sigma on either side of kernel - 99.994%
3*sigma on either side of kernel - 99.7%
If sigma is specified, filter width will be a multiple of 8 times sigma
Alternatively, specify filter size, then compute sigma: sigma = (size - 1) / 8.
If size is < the required width for 6-8 sigma, need to use different mode to create kernel
mode 'oversample' and 'center' are essentially identical for sigma 1, but very different for sigma 0.3
The sigma/size calculations below should work for non-integer sigma
"""
#import astropy.nddata
import astropy.convolution
dem = malib.checkma(dem)
#Generate 2D gaussian kernel for input sigma and size
#Default size is 8*sigma in x and y directions
#kernel = astropy.nddata.make_kernel([size, size], sigma, 'gaussian')
#Size must be odd
if size is not None:
size = int(np.floor(size/2)*2 + 1)
size = max(size, 3)
#Truncate the filter at this many standard deviations. Default is 4.0
truncate = 3.0
if size is not None and sigma is None:
sigma = (size - 1) / (2*truncate)
elif size is None and sigma is not None:
#Round up to nearest odd int
size = int(np.ceil((sigma * (2*truncate) + 1)/2)*2 - 1)
elif size is None and sigma is None:
#Use default parameters
sigma = 1
size = int(np.ceil((sigma * (2*truncate) + 1)/2)*2 - 1)
size = max(size, 3)
kernel = astropy.convolution.Gaussian2DKernel(sigma, x_size=size, y_size=size, mode='oversample')
print("Applying gaussian smoothing filter with size %i and sigma %0.3f (sum %0.3f)" % \
(size, sigma, kernel.array.sum()))
#This will fill holes
#np.nan is float
#dem_filt_gauss = astropy.nddata.convolve(dem.astype(float).filled(np.nan), kernel, boundary='fill', fill_value=np.nan)
#dem_filt_gauss = astropy.convolution.convolve(dem.astype(float).filled(np.nan), kernel, boundary='fill', fill_value=np.nan)
#Added normalization to ensure filtered values are not brightened/darkened if kernelsum != 1
dem_filt_gauss = astropy.convolution.convolve(dem.astype(float).filled(np.nan), kernel, boundary='fill', fill_value=np.nan, normalize_kernel=True)
#This will preserve original ndv pixels, applying original mask after filtering
if origmask:
print("Applying original mask")
#Allow filling of interior holes, but use original outer edge
if fill_interior:
mask = malib.maskfill(dem)
else:
mask = dem.mask
dem_filt_gauss = np.ma.array(dem_filt_gauss, mask=mask, fill_value=dem.fill_value)
out = np.ma.fix_invalid(dem_filt_gauss, copy=False, fill_value=dem.fill_value)
out.set_fill_value(dem.fill_value.astype(dem.dtype))
return out.astype(dem.dtype) | python | def gauss_fltr_astropy(dem, size=None, sigma=None, origmask=False, fill_interior=False):
"""Astropy gaussian filter properly handles convolution with NaN
http://stackoverflow.com/questions/23832852/by-which-measures-should-i-set-the-size-of-my-gaussian-filter-in-matlab
width1 = 3; sigma1 = (width1-1) / 6;
Specify width for smallest feature of interest and determine sigma appropriately
sigma is width of 1 std in pixels (not multiplier)
scipy and astropy both use cutoff of 4*sigma on either side of kernel - 99.994%
3*sigma on either side of kernel - 99.7%
If sigma is specified, filter width will be a multiple of 8 times sigma
Alternatively, specify filter size, then compute sigma: sigma = (size - 1) / 8.
If size is < the required width for 6-8 sigma, need to use different mode to create kernel
mode 'oversample' and 'center' are essentially identical for sigma 1, but very different for sigma 0.3
The sigma/size calculations below should work for non-integer sigma
"""
#import astropy.nddata
import astropy.convolution
dem = malib.checkma(dem)
#Generate 2D gaussian kernel for input sigma and size
#Default size is 8*sigma in x and y directions
#kernel = astropy.nddata.make_kernel([size, size], sigma, 'gaussian')
#Size must be odd
if size is not None:
size = int(np.floor(size/2)*2 + 1)
size = max(size, 3)
#Truncate the filter at this many standard deviations. Default is 4.0
truncate = 3.0
if size is not None and sigma is None:
sigma = (size - 1) / (2*truncate)
elif size is None and sigma is not None:
#Round up to nearest odd int
size = int(np.ceil((sigma * (2*truncate) + 1)/2)*2 - 1)
elif size is None and sigma is None:
#Use default parameters
sigma = 1
size = int(np.ceil((sigma * (2*truncate) + 1)/2)*2 - 1)
size = max(size, 3)
kernel = astropy.convolution.Gaussian2DKernel(sigma, x_size=size, y_size=size, mode='oversample')
print("Applying gaussian smoothing filter with size %i and sigma %0.3f (sum %0.3f)" % \
(size, sigma, kernel.array.sum()))
#This will fill holes
#np.nan is float
#dem_filt_gauss = astropy.nddata.convolve(dem.astype(float).filled(np.nan), kernel, boundary='fill', fill_value=np.nan)
#dem_filt_gauss = astropy.convolution.convolve(dem.astype(float).filled(np.nan), kernel, boundary='fill', fill_value=np.nan)
#Added normalization to ensure filtered values are not brightened/darkened if kernelsum != 1
dem_filt_gauss = astropy.convolution.convolve(dem.astype(float).filled(np.nan), kernel, boundary='fill', fill_value=np.nan, normalize_kernel=True)
#This will preserve original ndv pixels, applying original mask after filtering
if origmask:
print("Applying original mask")
#Allow filling of interior holes, but use original outer edge
if fill_interior:
mask = malib.maskfill(dem)
else:
mask = dem.mask
dem_filt_gauss = np.ma.array(dem_filt_gauss, mask=mask, fill_value=dem.fill_value)
out = np.ma.fix_invalid(dem_filt_gauss, copy=False, fill_value=dem.fill_value)
out.set_fill_value(dem.fill_value.astype(dem.dtype))
return out.astype(dem.dtype) | [
"def",
"gauss_fltr_astropy",
"(",
"dem",
",",
"size",
"=",
"None",
",",
"sigma",
"=",
"None",
",",
"origmask",
"=",
"False",
",",
"fill_interior",
"=",
"False",
")",
":",
"#import astropy.nddata",
"import",
"astropy",
".",
"convolution",
"dem",
"=",
"malib",... | Astropy gaussian filter properly handles convolution with NaN
http://stackoverflow.com/questions/23832852/by-which-measures-should-i-set-the-size-of-my-gaussian-filter-in-matlab
width1 = 3; sigma1 = (width1-1) / 6;
Specify width for smallest feature of interest and determine sigma appropriately
sigma is width of 1 std in pixels (not multiplier)
scipy and astropy both use cutoff of 4*sigma on either side of kernel - 99.994%
3*sigma on either side of kernel - 99.7%
If sigma is specified, filter width will be a multiple of 8 times sigma
Alternatively, specify filter size, then compute sigma: sigma = (size - 1) / 8.
If size is < the required width for 6-8 sigma, need to use different mode to create kernel
mode 'oversample' and 'center' are essentially identical for sigma 1, but very different for sigma 0.3
The sigma/size calculations below should work for non-integer sigma | [
"Astropy",
"gaussian",
"filter",
"properly",
"handles",
"convolution",
"with",
"NaN"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/filtlib.py#L101-L170 | train | 213,034 |
dshean/pygeotools | pygeotools/lib/filtlib.py | gauss_fltr_pyramid | def gauss_fltr_pyramid(dem, size=None, full=False, origmask=False):
"""Pyaramidal downsampling approach for gaussian smoothing
Avoids the need for large kernels, very fast
Needs testing
"""
dem = malib.checkma(dem)
levels = int(np.floor(np.log2(size)))
#print levels
dim = np.floor(np.array(dem.shape)/float(2**levels) + 1)*(2**levels)
#print dem.shape
#print dim
#Can do something with np.pad here
#np.pad(a_fp.filled(), 1, mode='constant', constant_values=(a_fp.fill_value,))
dem2 = np.full(dim, dem.fill_value)
offset = (dim - np.array(dem.shape))/2.0
#print offset
#dem2[0:dem.shape[0],0:dem.shape[1]] = dem.data
dem2[offset[0]:dem.shape[0]+offset[0],offset[1]:dem.shape[1]+offset[1]] = dem.data
dem2 = np.ma.masked_equal(dem2, dem.fill_value)
#dem2 = dem
for n in range(levels):
print(dem2.shape)
dim = (np.floor(np.array(dem2.shape)/2.0 + 1)*2).astype(int)
#dem2 = gauss_fltr_astropy(dem2, size=5, origmask=origmask)
#dem2 = gauss_fltr_astropy(dem2, size=5)
dem2 = gauss_fltr_astropy(dem2, size=5)
#Note: Should use zoom with same bilinear interpolation here for consistency
#However, this doesn't respect nan
#dem2 = zoom(dem2, 0.5, order=1, prefilter=False, cval=dem.fill_value)
dem2 = dem2[::2,::2]
if full:
print("Resizing to original input dimensions")
from scipy.ndimage import zoom
for n in range(levels):
print(dem2.shape)
#Note: order 1 is bilinear
dem2 = zoom(dem2, 2, order=1, prefilter=False, cval=dem.fill_value)
#dem2 = zoom(dem2, 2**levels, order=1, prefilter=False, cval=dem2.fill_value)
print(dem2.shape)
#This was for power of 2 offset
#offset = (2**levels)/2
#print offset
#dem2 = dem2[offset:dem.shape[0]+offset,offset:dem.shape[1]+offset]
#Use original offset
dem2 = dem2[offset[0]:dem.shape[0]+offset[0],offset[1]:dem.shape[1]+offset[1]]
if origmask:
print("Applying original mask")
#Allow filling of interior holes, but use original outer edge
maskfill = malib.maskfill(dem)
#dem2 = np.ma.array(dem2, mask=np.ma.getmaskarray(dem))
dem2 = np.ma.array(dem2, mask=maskfill, fill_value=dem.fill_value)
return dem2 | python | def gauss_fltr_pyramid(dem, size=None, full=False, origmask=False):
"""Pyaramidal downsampling approach for gaussian smoothing
Avoids the need for large kernels, very fast
Needs testing
"""
dem = malib.checkma(dem)
levels = int(np.floor(np.log2(size)))
#print levels
dim = np.floor(np.array(dem.shape)/float(2**levels) + 1)*(2**levels)
#print dem.shape
#print dim
#Can do something with np.pad here
#np.pad(a_fp.filled(), 1, mode='constant', constant_values=(a_fp.fill_value,))
dem2 = np.full(dim, dem.fill_value)
offset = (dim - np.array(dem.shape))/2.0
#print offset
#dem2[0:dem.shape[0],0:dem.shape[1]] = dem.data
dem2[offset[0]:dem.shape[0]+offset[0],offset[1]:dem.shape[1]+offset[1]] = dem.data
dem2 = np.ma.masked_equal(dem2, dem.fill_value)
#dem2 = dem
for n in range(levels):
print(dem2.shape)
dim = (np.floor(np.array(dem2.shape)/2.0 + 1)*2).astype(int)
#dem2 = gauss_fltr_astropy(dem2, size=5, origmask=origmask)
#dem2 = gauss_fltr_astropy(dem2, size=5)
dem2 = gauss_fltr_astropy(dem2, size=5)
#Note: Should use zoom with same bilinear interpolation here for consistency
#However, this doesn't respect nan
#dem2 = zoom(dem2, 0.5, order=1, prefilter=False, cval=dem.fill_value)
dem2 = dem2[::2,::2]
if full:
print("Resizing to original input dimensions")
from scipy.ndimage import zoom
for n in range(levels):
print(dem2.shape)
#Note: order 1 is bilinear
dem2 = zoom(dem2, 2, order=1, prefilter=False, cval=dem.fill_value)
#dem2 = zoom(dem2, 2**levels, order=1, prefilter=False, cval=dem2.fill_value)
print(dem2.shape)
#This was for power of 2 offset
#offset = (2**levels)/2
#print offset
#dem2 = dem2[offset:dem.shape[0]+offset,offset:dem.shape[1]+offset]
#Use original offset
dem2 = dem2[offset[0]:dem.shape[0]+offset[0],offset[1]:dem.shape[1]+offset[1]]
if origmask:
print("Applying original mask")
#Allow filling of interior holes, but use original outer edge
maskfill = malib.maskfill(dem)
#dem2 = np.ma.array(dem2, mask=np.ma.getmaskarray(dem))
dem2 = np.ma.array(dem2, mask=maskfill, fill_value=dem.fill_value)
return dem2 | [
"def",
"gauss_fltr_pyramid",
"(",
"dem",
",",
"size",
"=",
"None",
",",
"full",
"=",
"False",
",",
"origmask",
"=",
"False",
")",
":",
"dem",
"=",
"malib",
".",
"checkma",
"(",
"dem",
")",
"levels",
"=",
"int",
"(",
"np",
".",
"floor",
"(",
"np",
... | Pyaramidal downsampling approach for gaussian smoothing
Avoids the need for large kernels, very fast
Needs testing | [
"Pyaramidal",
"downsampling",
"approach",
"for",
"gaussian",
"smoothing",
"Avoids",
"the",
"need",
"for",
"large",
"kernels",
"very",
"fast",
"Needs",
"testing"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/filtlib.py#L175-L226 | train | 213,035 |
dshean/pygeotools | pygeotools/lib/filtlib.py | gauss_fltr_opencv | def gauss_fltr_opencv(dem, size=3, sigma=1):
"""OpenCV Gaussian filter
Still propagates NaN values
"""
import cv2
dem = malib.checkma(dem)
dem_cv = cv2.GaussianBlur(dem.filled(np.nan), (size, size), sigma)
out = np.ma.fix_invalid(dem_cv)
out.set_fill_value(dem.fill_value)
return out | python | def gauss_fltr_opencv(dem, size=3, sigma=1):
"""OpenCV Gaussian filter
Still propagates NaN values
"""
import cv2
dem = malib.checkma(dem)
dem_cv = cv2.GaussianBlur(dem.filled(np.nan), (size, size), sigma)
out = np.ma.fix_invalid(dem_cv)
out.set_fill_value(dem.fill_value)
return out | [
"def",
"gauss_fltr_opencv",
"(",
"dem",
",",
"size",
"=",
"3",
",",
"sigma",
"=",
"1",
")",
":",
"import",
"cv2",
"dem",
"=",
"malib",
".",
"checkma",
"(",
"dem",
")",
"dem_cv",
"=",
"cv2",
".",
"GaussianBlur",
"(",
"dem",
".",
"filled",
"(",
"np",... | OpenCV Gaussian filter
Still propagates NaN values | [
"OpenCV",
"Gaussian",
"filter",
"Still",
"propagates",
"NaN",
"values"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/filtlib.py#L229-L238 | train | 213,036 |
dshean/pygeotools | pygeotools/lib/filtlib.py | gaussfill | def gaussfill(dem, size=3, newmask=None):
"""Gaussian filter with filling
"""
smooth = gauss_fltr_astropy(dem, size=size)
smooth[~dem.mask] = dem[~dem.mask]
if newmask is not None:
smooth = np.ma.array(smooth, mask=newmask)
return smooth | python | def gaussfill(dem, size=3, newmask=None):
"""Gaussian filter with filling
"""
smooth = gauss_fltr_astropy(dem, size=size)
smooth[~dem.mask] = dem[~dem.mask]
if newmask is not None:
smooth = np.ma.array(smooth, mask=newmask)
return smooth | [
"def",
"gaussfill",
"(",
"dem",
",",
"size",
"=",
"3",
",",
"newmask",
"=",
"None",
")",
":",
"smooth",
"=",
"gauss_fltr_astropy",
"(",
"dem",
",",
"size",
"=",
"size",
")",
"smooth",
"[",
"~",
"dem",
".",
"mask",
"]",
"=",
"dem",
"[",
"~",
"dem"... | Gaussian filter with filling | [
"Gaussian",
"filter",
"with",
"filling"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/filtlib.py#L240-L247 | train | 213,037 |
dshean/pygeotools | pygeotools/lib/filtlib.py | median_fltr | def median_fltr(dem, fsize=7, origmask=False):
"""Scipy.ndimage median filter
Does not properly handle NaN
"""
print("Applying median filter with size %s" % fsize)
from scipy.ndimage.filters import median_filter
dem_filt_med = median_filter(dem.filled(np.nan), fsize)
#Now mask all nans
out = np.ma.fix_invalid(dem_filt_med, copy=False, fill_value=dem.fill_value)
if origmask:
out = np.ma.array(out, mask=dem.mask, fill_value=dem.fill_value)
out.set_fill_value(dem.fill_value)
return out | python | def median_fltr(dem, fsize=7, origmask=False):
"""Scipy.ndimage median filter
Does not properly handle NaN
"""
print("Applying median filter with size %s" % fsize)
from scipy.ndimage.filters import median_filter
dem_filt_med = median_filter(dem.filled(np.nan), fsize)
#Now mask all nans
out = np.ma.fix_invalid(dem_filt_med, copy=False, fill_value=dem.fill_value)
if origmask:
out = np.ma.array(out, mask=dem.mask, fill_value=dem.fill_value)
out.set_fill_value(dem.fill_value)
return out | [
"def",
"median_fltr",
"(",
"dem",
",",
"fsize",
"=",
"7",
",",
"origmask",
"=",
"False",
")",
":",
"print",
"(",
"\"Applying median filter with size %s\"",
"%",
"fsize",
")",
"from",
"scipy",
".",
"ndimage",
".",
"filters",
"import",
"median_filter",
"dem_filt... | Scipy.ndimage median filter
Does not properly handle NaN | [
"Scipy",
".",
"ndimage",
"median",
"filter"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/filtlib.py#L271-L284 | train | 213,038 |
dshean/pygeotools | pygeotools/lib/filtlib.py | median_fltr_opencv | def median_fltr_opencv(dem, size=3, iterations=1):
"""OpenCV median filter
"""
import cv2
dem = malib.checkma(dem)
if size > 5:
print("Need to implement iteration")
n = 0
out = dem
while n <= iterations:
dem_cv = cv2.medianBlur(out.astype(np.float32).filled(np.nan), size)
out = np.ma.fix_invalid(dem_cv)
out.set_fill_value(dem.fill_value)
n += 1
return out | python | def median_fltr_opencv(dem, size=3, iterations=1):
"""OpenCV median filter
"""
import cv2
dem = malib.checkma(dem)
if size > 5:
print("Need to implement iteration")
n = 0
out = dem
while n <= iterations:
dem_cv = cv2.medianBlur(out.astype(np.float32).filled(np.nan), size)
out = np.ma.fix_invalid(dem_cv)
out.set_fill_value(dem.fill_value)
n += 1
return out | [
"def",
"median_fltr_opencv",
"(",
"dem",
",",
"size",
"=",
"3",
",",
"iterations",
"=",
"1",
")",
":",
"import",
"cv2",
"dem",
"=",
"malib",
".",
"checkma",
"(",
"dem",
")",
"if",
"size",
">",
"5",
":",
"print",
"(",
"\"Need to implement iteration\"",
... | OpenCV median filter | [
"OpenCV",
"median",
"filter"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/filtlib.py#L287-L301 | train | 213,039 |
dshean/pygeotools | pygeotools/lib/filtlib.py | circular_mask | def circular_mask(size):
"""Create a circular mask for an array
Useful when sampling rasters for a laser shot
"""
r = size/2
c = (r,r)
y,x = np.ogrid[-c[0]:size-c[0], -c[1]:size-c[1]]
mask = ~(x*x + y*y <= r*r)
return mask | python | def circular_mask(size):
"""Create a circular mask for an array
Useful when sampling rasters for a laser shot
"""
r = size/2
c = (r,r)
y,x = np.ogrid[-c[0]:size-c[0], -c[1]:size-c[1]]
mask = ~(x*x + y*y <= r*r)
return mask | [
"def",
"circular_mask",
"(",
"size",
")",
":",
"r",
"=",
"size",
"/",
"2",
"c",
"=",
"(",
"r",
",",
"r",
")",
"y",
",",
"x",
"=",
"np",
".",
"ogrid",
"[",
"-",
"c",
"[",
"0",
"]",
":",
"size",
"-",
"c",
"[",
"0",
"]",
",",
"-",
"c",
"... | Create a circular mask for an array
Useful when sampling rasters for a laser shot | [
"Create",
"a",
"circular",
"mask",
"for",
"an",
"array",
"Useful",
"when",
"sampling",
"rasters",
"for",
"a",
"laser",
"shot"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/filtlib.py#L303-L312 | train | 213,040 |
dshean/pygeotools | pygeotools/lib/filtlib.py | median_fltr_skimage | def median_fltr_skimage(dem, radius=3, erode=1, origmask=False):
"""
Older skimage.filter.median_filter
This smooths, removes noise and fills in nodata areas with median of valid pixels! Effectively an inpainting routine
"""
#Note, ndimage doesn't properly handle ma - convert to nan
dem = malib.checkma(dem)
dem = dem.astype(np.float64)
#Mask islands
if erode > 0:
print("Eroding islands smaller than %s pixels" % (erode * 2))
dem = malib.mask_islands(dem, iterations=erode)
print("Applying median filter with radius %s" % radius)
#Note: this funcitonality was present in scikit-image 0.9.3
import skimage.filter
dem_filt_med = skimage.filter.median_filter(dem, radius, mask=~dem.mask)
#Starting in version 0.10.0, this is the new filter
#This is the new filter, but only supports uint8 or unit16
#import skimage.filters
#import skimage.morphology
#dem_filt_med = skimage.filters.rank.median(dem, disk(radius), mask=~dem.mask)
#dem_filt_med = skimage.filters.median(dem, skimage.morphology.disk(radius), mask=~dem.mask)
#Now mask all nans
#skimage assigns the minimum value as nodata
#CHECK THIS, seems pretty hacky
#Also, looks like some valid values are masked at this stage, even though they should be above min
ndv = np.min(dem_filt_med)
#ndv = dem_filt_med.min() + 0.001
out = np.ma.masked_less_equal(dem_filt_med, ndv)
#Should probably replace the ndv with original ndv
out.set_fill_value(dem.fill_value)
if origmask:
print("Applying original mask")
#Allow filling of interior holes, but use original outer edge
#maskfill = malib.maskfill(dem, iterations=radius)
maskfill = malib.maskfill(dem)
#dem_filt_gauss = np.ma.array(dem_filt_gauss, mask=dem.mask, fill_value=dem.fill_value)
out = np.ma.array(out, mask=maskfill, fill_value=dem.fill_value)
return out | python | def median_fltr_skimage(dem, radius=3, erode=1, origmask=False):
"""
Older skimage.filter.median_filter
This smooths, removes noise and fills in nodata areas with median of valid pixels! Effectively an inpainting routine
"""
#Note, ndimage doesn't properly handle ma - convert to nan
dem = malib.checkma(dem)
dem = dem.astype(np.float64)
#Mask islands
if erode > 0:
print("Eroding islands smaller than %s pixels" % (erode * 2))
dem = malib.mask_islands(dem, iterations=erode)
print("Applying median filter with radius %s" % radius)
#Note: this funcitonality was present in scikit-image 0.9.3
import skimage.filter
dem_filt_med = skimage.filter.median_filter(dem, radius, mask=~dem.mask)
#Starting in version 0.10.0, this is the new filter
#This is the new filter, but only supports uint8 or unit16
#import skimage.filters
#import skimage.morphology
#dem_filt_med = skimage.filters.rank.median(dem, disk(radius), mask=~dem.mask)
#dem_filt_med = skimage.filters.median(dem, skimage.morphology.disk(radius), mask=~dem.mask)
#Now mask all nans
#skimage assigns the minimum value as nodata
#CHECK THIS, seems pretty hacky
#Also, looks like some valid values are masked at this stage, even though they should be above min
ndv = np.min(dem_filt_med)
#ndv = dem_filt_med.min() + 0.001
out = np.ma.masked_less_equal(dem_filt_med, ndv)
#Should probably replace the ndv with original ndv
out.set_fill_value(dem.fill_value)
if origmask:
print("Applying original mask")
#Allow filling of interior holes, but use original outer edge
#maskfill = malib.maskfill(dem, iterations=radius)
maskfill = malib.maskfill(dem)
#dem_filt_gauss = np.ma.array(dem_filt_gauss, mask=dem.mask, fill_value=dem.fill_value)
out = np.ma.array(out, mask=maskfill, fill_value=dem.fill_value)
return out | [
"def",
"median_fltr_skimage",
"(",
"dem",
",",
"radius",
"=",
"3",
",",
"erode",
"=",
"1",
",",
"origmask",
"=",
"False",
")",
":",
"#Note, ndimage doesn't properly handle ma - convert to nan",
"dem",
"=",
"malib",
".",
"checkma",
"(",
"dem",
")",
"dem",
"=",
... | Older skimage.filter.median_filter
This smooths, removes noise and fills in nodata areas with median of valid pixels! Effectively an inpainting routine | [
"Older",
"skimage",
".",
"filter",
".",
"median_filter"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/filtlib.py#L341-L380 | train | 213,041 |
dshean/pygeotools | pygeotools/lib/filtlib.py | dz_fltr | def dz_fltr(dem_fn, refdem_fn, perc=None, rangelim=(0,30), smooth=False):
"""Absolute elevation difference range filter using values from a source raster file and a reference raster file
"""
try:
open(refdem_fn)
except IOError:
sys.exit('Unable to open reference DEM: %s' % refdem_fn)
from pygeotools.lib import warplib
dem_ds, refdem_ds = warplib.memwarp_multi_fn([dem_fn, refdem_fn], res='first', extent='first', t_srs='first')
dem = iolib.ds_getma(dem_ds)
refdem = iolib.ds_getma(refdem_ds)
out = dz_fltr_ma(dem, refdem, perc, rangelim, smooth)
return out | python | def dz_fltr(dem_fn, refdem_fn, perc=None, rangelim=(0,30), smooth=False):
"""Absolute elevation difference range filter using values from a source raster file and a reference raster file
"""
try:
open(refdem_fn)
except IOError:
sys.exit('Unable to open reference DEM: %s' % refdem_fn)
from pygeotools.lib import warplib
dem_ds, refdem_ds = warplib.memwarp_multi_fn([dem_fn, refdem_fn], res='first', extent='first', t_srs='first')
dem = iolib.ds_getma(dem_ds)
refdem = iolib.ds_getma(refdem_ds)
out = dz_fltr_ma(dem, refdem, perc, rangelim, smooth)
return out | [
"def",
"dz_fltr",
"(",
"dem_fn",
",",
"refdem_fn",
",",
"perc",
"=",
"None",
",",
"rangelim",
"=",
"(",
"0",
",",
"30",
")",
",",
"smooth",
"=",
"False",
")",
":",
"try",
":",
"open",
"(",
"refdem_fn",
")",
"except",
"IOError",
":",
"sys",
".",
"... | Absolute elevation difference range filter using values from a source raster file and a reference raster file | [
"Absolute",
"elevation",
"difference",
"range",
"filter",
"using",
"values",
"from",
"a",
"source",
"raster",
"file",
"and",
"a",
"reference",
"raster",
"file"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/filtlib.py#L396-L409 | train | 213,042 |
dshean/pygeotools | pygeotools/lib/filtlib.py | dz_fltr_ma | def dz_fltr_ma(dem, refdem, perc=None, rangelim=(0,30), smooth=False):
"""Absolute elevation difference range filter using values from a source array and a reference array
"""
if smooth:
refdem = gauss_fltr_astropy(refdem)
dem = gauss_fltr_astropy(dem)
dz = refdem - dem
#This is True for invalid values in DEM, and should be masked
demmask = np.ma.getmaskarray(dem)
if perc:
dz_perc = malib.calcperc(dz, perc)
print("Applying dz percentile filter (%s%%, %s%%): (%0.1f, %0.1f)" % (perc[0], perc[1], dz_perc[0], dz_perc[1]))
#This is True for invalid values
perc_mask = ((dz < dz_perc[0]) | (dz > dz_perc[1])).filled(False)
demmask = (demmask | perc_mask)
if rangelim:
#This is True for invalid values
range_mask = ((np.abs(dz) < rangelim[0]) | (np.abs(dz) > rangelim[1])).filled(False)
if False:
cutoff = 150
rangelim = (0, 80)
low = (refdem < cutoff).data
range_mask[low] = ((np.abs(dz) < rangelim[0]) | (np.abs(dz) > rangelim[1])).filled(False)[low]
demmask = (demmask | range_mask)
out = np.ma.array(dem, mask=demmask, fill_value=dem.fill_value)
return out | python | def dz_fltr_ma(dem, refdem, perc=None, rangelim=(0,30), smooth=False):
"""Absolute elevation difference range filter using values from a source array and a reference array
"""
if smooth:
refdem = gauss_fltr_astropy(refdem)
dem = gauss_fltr_astropy(dem)
dz = refdem - dem
#This is True for invalid values in DEM, and should be masked
demmask = np.ma.getmaskarray(dem)
if perc:
dz_perc = malib.calcperc(dz, perc)
print("Applying dz percentile filter (%s%%, %s%%): (%0.1f, %0.1f)" % (perc[0], perc[1], dz_perc[0], dz_perc[1]))
#This is True for invalid values
perc_mask = ((dz < dz_perc[0]) | (dz > dz_perc[1])).filled(False)
demmask = (demmask | perc_mask)
if rangelim:
#This is True for invalid values
range_mask = ((np.abs(dz) < rangelim[0]) | (np.abs(dz) > rangelim[1])).filled(False)
if False:
cutoff = 150
rangelim = (0, 80)
low = (refdem < cutoff).data
range_mask[low] = ((np.abs(dz) < rangelim[0]) | (np.abs(dz) > rangelim[1])).filled(False)[low]
demmask = (demmask | range_mask)
out = np.ma.array(dem, mask=demmask, fill_value=dem.fill_value)
return out | [
"def",
"dz_fltr_ma",
"(",
"dem",
",",
"refdem",
",",
"perc",
"=",
"None",
",",
"rangelim",
"=",
"(",
"0",
",",
"30",
")",
",",
"smooth",
"=",
"False",
")",
":",
"if",
"smooth",
":",
"refdem",
"=",
"gauss_fltr_astropy",
"(",
"refdem",
")",
"dem",
"=... | Absolute elevation difference range filter using values from a source array and a reference array | [
"Absolute",
"elevation",
"difference",
"range",
"filter",
"using",
"values",
"from",
"a",
"source",
"array",
"and",
"a",
"reference",
"array"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/filtlib.py#L411-L441 | train | 213,043 |
dshean/pygeotools | pygeotools/lib/filtlib.py | erode_edge | def erode_edge(dem, iterations=1):
"""Erode pixels near nodata
"""
import scipy.ndimage as ndimage
print('Eroding pixels near nodata: %i iterations' % iterations)
mask = np.ma.getmaskarray(dem)
mask_dilate = ndimage.morphology.binary_dilation(mask, iterations=iterations)
out = np.ma.array(dem, mask=mask_dilate)
return out | python | def erode_edge(dem, iterations=1):
"""Erode pixels near nodata
"""
import scipy.ndimage as ndimage
print('Eroding pixels near nodata: %i iterations' % iterations)
mask = np.ma.getmaskarray(dem)
mask_dilate = ndimage.morphology.binary_dilation(mask, iterations=iterations)
out = np.ma.array(dem, mask=mask_dilate)
return out | [
"def",
"erode_edge",
"(",
"dem",
",",
"iterations",
"=",
"1",
")",
":",
"import",
"scipy",
".",
"ndimage",
"as",
"ndimage",
"print",
"(",
"'Eroding pixels near nodata: %i iterations'",
"%",
"iterations",
")",
"mask",
"=",
"np",
".",
"ma",
".",
"getmaskarray",
... | Erode pixels near nodata | [
"Erode",
"pixels",
"near",
"nodata"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/filtlib.py#L462-L470 | train | 213,044 |
dshean/pygeotools | pygeotools/lib/filtlib.py | butter | def butter(dt_list, val, lowpass=1.0):
"""This is framework for a butterworth bandpass for 1D data
Needs to be cleaned up and generalized
"""
import scipy.signal
import matplotlib.pyplot as plt
#dt is 300 s, 5 min
dt_diff = np.diff(dt_list)
dt_diff = np.array([dt.total_seconds() for dt in dt_diff])
dt = malib.fast_median(dt_diff)
#f is 0.00333 Hz
#288 samples/day
fs = 1./dt
nyq = fs/2.
if False:
#psd, f = psd(z_msl, fs)
sp_f, sp_psd = scipy.signal.periodogram(val, fs, detrend='linear')
#sp_f, sp_psd = scipy.signal.welch(z_msl, fs, nperseg=2048)
sp_f_days = 1./sp_f/86400.
plt.figure()
plt.plot(sp_f, sp_psd)
plt.plot(sp_f_days, sp_psd)
plt.semilogy(sp_f_days, sp_psd)
plt.xlabel('Frequency')
plt.ylabel('Power')
print("Filtering tidal signal")
#Define bandpass filter
#f_min = dt/(86400*0.25)
f_max = (1./(86400*0.1)) / nyq
f_min = (1./(86400*1.8)) / nyq
order = 6
b, a = scipy.signal.butter(order, f_min, btype='highpass')
#b, a = sp.signal.butter(order, (f_min, f_max), btype='bandpass')
w, h = scipy.signal.freqz(b, a, worN=2000)
w_f = (nyq/np.pi)*w
w_f_days = 1/w_f/86400.
#plt.figure()
#plt.plot(w_f_days, np.abs(h))
val_f_tide = scipy.signal.filtfilt(b, a, val)
b, a = scipy.signal.butter(order, f_max, btype='lowpass')
#b, a = sp.signal.butter(order, (f_min, f_max), btype='bandstop')
w, h = scipy.signal.freqz(b, a, worN=2000)
w_f = (nyq/np.pi)*w
w_f_days = 1/w_f/86400.
#plt.plot(w_f_days, np.abs(h))
val_f_tide_denoise = scipy.signal.filtfilt(b, a, val_f_tide)
#val_f_notide = sp.signal.filtfilt(b, a, val)
val_f_notide = val - val_f_tide | python | def butter(dt_list, val, lowpass=1.0):
"""This is framework for a butterworth bandpass for 1D data
Needs to be cleaned up and generalized
"""
import scipy.signal
import matplotlib.pyplot as plt
#dt is 300 s, 5 min
dt_diff = np.diff(dt_list)
dt_diff = np.array([dt.total_seconds() for dt in dt_diff])
dt = malib.fast_median(dt_diff)
#f is 0.00333 Hz
#288 samples/day
fs = 1./dt
nyq = fs/2.
if False:
#psd, f = psd(z_msl, fs)
sp_f, sp_psd = scipy.signal.periodogram(val, fs, detrend='linear')
#sp_f, sp_psd = scipy.signal.welch(z_msl, fs, nperseg=2048)
sp_f_days = 1./sp_f/86400.
plt.figure()
plt.plot(sp_f, sp_psd)
plt.plot(sp_f_days, sp_psd)
plt.semilogy(sp_f_days, sp_psd)
plt.xlabel('Frequency')
plt.ylabel('Power')
print("Filtering tidal signal")
#Define bandpass filter
#f_min = dt/(86400*0.25)
f_max = (1./(86400*0.1)) / nyq
f_min = (1./(86400*1.8)) / nyq
order = 6
b, a = scipy.signal.butter(order, f_min, btype='highpass')
#b, a = sp.signal.butter(order, (f_min, f_max), btype='bandpass')
w, h = scipy.signal.freqz(b, a, worN=2000)
w_f = (nyq/np.pi)*w
w_f_days = 1/w_f/86400.
#plt.figure()
#plt.plot(w_f_days, np.abs(h))
val_f_tide = scipy.signal.filtfilt(b, a, val)
b, a = scipy.signal.butter(order, f_max, btype='lowpass')
#b, a = sp.signal.butter(order, (f_min, f_max), btype='bandstop')
w, h = scipy.signal.freqz(b, a, worN=2000)
w_f = (nyq/np.pi)*w
w_f_days = 1/w_f/86400.
#plt.plot(w_f_days, np.abs(h))
val_f_tide_denoise = scipy.signal.filtfilt(b, a, val_f_tide)
#val_f_notide = sp.signal.filtfilt(b, a, val)
val_f_notide = val - val_f_tide | [
"def",
"butter",
"(",
"dt_list",
",",
"val",
",",
"lowpass",
"=",
"1.0",
")",
":",
"import",
"scipy",
".",
"signal",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"#dt is 300 s, 5 min",
"dt_diff",
"=",
"np",
".",
"diff",
"(",
"dt_list",
")",
"dt_di... | This is framework for a butterworth bandpass for 1D data
Needs to be cleaned up and generalized | [
"This",
"is",
"framework",
"for",
"a",
"butterworth",
"bandpass",
"for",
"1D",
"data",
"Needs",
"to",
"be",
"cleaned",
"up",
"and",
"generalized"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/filtlib.py#L509-L562 | train | 213,045 |
dshean/pygeotools | pygeotools/lib/filtlib.py | freq_filt | def freq_filt(bma):
"""
This is a framework for 2D FFT filtering. It has not be tested or finished - might be a dead end
See separate utility freq_analysis.py
"""
"""
Want to fit linear function to artifact line in freq space,
Then mask everything near that line at distances of ~5-200 pixels,
Or whatever the maximum CCD artifact dimension happens to be,
This will depend on scaling - consult CCD map for interval
"""
#Fill ndv with random data
bf = malib.randomfill(bma)
import scipy.fftpack
f = scipy.fftpack.fft2(bf)
ff = scipy.fftpack.fftshift(f)
#Ben suggested a Hahn filter here, remove the low frequency, high amplitude information
#Then do a second fft?
#np.log(np.abs(ff))
#perc = malib.calcperc(np.real(ff), perc=(80, 95))
#malib.iv(numpy.real(ff), clim=perc)
#See http://scipy-lectures.github.io/advanced/image_processing/
#Starting at a,b, compute argmax along vertical axis for restricted range
#Fit line to the x and y argmax values
#Mask [argmax[y]-1:argmax[y]+1]
#Create radial mask
ff_dim = np.array(ff.shape)
a,b = ff_dim/2
n = ff_dim.max()
y,x = np.ogrid[-a:n-a, -b:n-b]
r1 = 40
r2 = 60
ff_mask = np.ma.make_mask(ff)
radial_mask = (r1**2 <= x**2 + y**2) & (x**2 + y**2 < r2**2)
#Note issues with rounding indices here
#Hacked in +1 for testing
ff_mask[:] = radial_mask[a-ff_dim[0]/2:a+ff_dim[0], b-ff_dim[1]/2:b+1+ff_dim[1]/2]
#Combine radial and line mask
#Convert mask to 0-1, then feather
fm = ff * ff_mask
#Inverse fft
bf_filt = scipy.fftpack.ifft2(scipy.fftpack.ifftshift(fm))
#Apply original mask
bf_filt = np.ma.masked_array(bf_filt, bma.mask) | python | def freq_filt(bma):
"""
This is a framework for 2D FFT filtering. It has not be tested or finished - might be a dead end
See separate utility freq_analysis.py
"""
"""
Want to fit linear function to artifact line in freq space,
Then mask everything near that line at distances of ~5-200 pixels,
Or whatever the maximum CCD artifact dimension happens to be,
This will depend on scaling - consult CCD map for interval
"""
#Fill ndv with random data
bf = malib.randomfill(bma)
import scipy.fftpack
f = scipy.fftpack.fft2(bf)
ff = scipy.fftpack.fftshift(f)
#Ben suggested a Hahn filter here, remove the low frequency, high amplitude information
#Then do a second fft?
#np.log(np.abs(ff))
#perc = malib.calcperc(np.real(ff), perc=(80, 95))
#malib.iv(numpy.real(ff), clim=perc)
#See http://scipy-lectures.github.io/advanced/image_processing/
#Starting at a,b, compute argmax along vertical axis for restricted range
#Fit line to the x and y argmax values
#Mask [argmax[y]-1:argmax[y]+1]
#Create radial mask
ff_dim = np.array(ff.shape)
a,b = ff_dim/2
n = ff_dim.max()
y,x = np.ogrid[-a:n-a, -b:n-b]
r1 = 40
r2 = 60
ff_mask = np.ma.make_mask(ff)
radial_mask = (r1**2 <= x**2 + y**2) & (x**2 + y**2 < r2**2)
#Note issues with rounding indices here
#Hacked in +1 for testing
ff_mask[:] = radial_mask[a-ff_dim[0]/2:a+ff_dim[0], b-ff_dim[1]/2:b+1+ff_dim[1]/2]
#Combine radial and line mask
#Convert mask to 0-1, then feather
fm = ff * ff_mask
#Inverse fft
bf_filt = scipy.fftpack.ifft2(scipy.fftpack.ifftshift(fm))
#Apply original mask
bf_filt = np.ma.masked_array(bf_filt, bma.mask) | [
"def",
"freq_filt",
"(",
"bma",
")",
":",
"\"\"\"\n Want to fit linear function to artifact line in freq space,\n Then mask everything near that line at distances of ~5-200 pixels, \n Or whatever the maximum CCD artifact dimension happens to be, \n This will depend on scaling - consult CCD m... | This is a framework for 2D FFT filtering. It has not be tested or finished - might be a dead end
See separate utility freq_analysis.py | [
"This",
"is",
"a",
"framework",
"for",
"2D",
"FFT",
"filtering",
".",
"It",
"has",
"not",
"be",
"tested",
"or",
"finished",
"-",
"might",
"be",
"a",
"dead",
"end"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/filtlib.py#L565-L622 | train | 213,046 |
dshean/pygeotools | pygeotools/lib/malib.py | stack_smooth | def stack_smooth(s_orig, size=7, save=False):
"""Run Gaussian smoothing filter on exising stack object
"""
from copy import deepcopy
from pygeotools.lib import filtlib
print("Copying original DEMStack")
s = deepcopy(s_orig)
s.stack_fn = os.path.splitext(s_orig.stack_fn)[0]+'_smooth%ipx.npz' % size
#Loop through each array and smooth
print("Smoothing all arrays in stack with %i px gaussian filter" % size)
for i in range(s.ma_stack.shape[0]):
print('%i of %i' % (i+1, s.ma_stack.shape[0]))
s.ma_stack[i] = filtlib.gauss_fltr_astropy(s.ma_stack[i], size=size)
if s.stats:
s.compute_stats()
if save:
s.write_stats()
#Update datestack
if s.datestack and s.date_list_o.count() > 1:
s.compute_dt_stats()
if save:
s.write_datestack()
#Update trend
if s.trend:
s.compute_trend()
if save:
s.write_trend()
if save:
s.savestack()
return s | python | def stack_smooth(s_orig, size=7, save=False):
"""Run Gaussian smoothing filter on exising stack object
"""
from copy import deepcopy
from pygeotools.lib import filtlib
print("Copying original DEMStack")
s = deepcopy(s_orig)
s.stack_fn = os.path.splitext(s_orig.stack_fn)[0]+'_smooth%ipx.npz' % size
#Loop through each array and smooth
print("Smoothing all arrays in stack with %i px gaussian filter" % size)
for i in range(s.ma_stack.shape[0]):
print('%i of %i' % (i+1, s.ma_stack.shape[0]))
s.ma_stack[i] = filtlib.gauss_fltr_astropy(s.ma_stack[i], size=size)
if s.stats:
s.compute_stats()
if save:
s.write_stats()
#Update datestack
if s.datestack and s.date_list_o.count() > 1:
s.compute_dt_stats()
if save:
s.write_datestack()
#Update trend
if s.trend:
s.compute_trend()
if save:
s.write_trend()
if save:
s.savestack()
return s | [
"def",
"stack_smooth",
"(",
"s_orig",
",",
"size",
"=",
"7",
",",
"save",
"=",
"False",
")",
":",
"from",
"copy",
"import",
"deepcopy",
"from",
"pygeotools",
".",
"lib",
"import",
"filtlib",
"print",
"(",
"\"Copying original DEMStack\"",
")",
"s",
"=",
"de... | Run Gaussian smoothing filter on exising stack object | [
"Run",
"Gaussian",
"smoothing",
"filter",
"on",
"exising",
"stack",
"object"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/malib.py#L691-L722 | train | 213,047 |
dshean/pygeotools | pygeotools/lib/malib.py | stack_clip | def stack_clip(s_orig, extent, out_stack_fn=None, copy=True, save=False):
"""Create a new stack object with limited extent from an exising stack object
"""
#Should check for valid extent
#This is not memory efficient, but is much simpler
#To be safe, if we are saving out, create a copy to avoid overwriting
if copy or save:
from copy import deepcopy
print("Copying original DEMStack")
s = deepcopy(s_orig)
else:
#Want to be very careful here, as we could overwrite the original file
s = s_orig
from pygeotools.lib import geolib
gt = s.gt
s_shape = s.ma_stack.shape[1:3]
#Compute pixel bounds for input extent
min_x_px, max_y_px = geolib.mapToPixel(extent[0], extent[1], gt)
max_x_px, min_y_px = geolib.mapToPixel(extent[2], extent[3], gt)
#Clip to stack extent and round to whole integers
min_x_px = int(max(0, min_x_px)+0.5)
max_x_px = int(min(s_shape[1], max_x_px)+0.5)
min_y_px = int(max(0, min_y_px)+0.5)
max_y_px = int(min(s_shape[0], max_y_px)+0.5)
#Clip the stack
x_slice = slice(min_x_px,max_x_px)
y_slice = slice(min_y_px,max_y_px)
s.ma_stack = s.ma_stack[:, y_slice, x_slice]
#Now update geospatial info
#This returns the pixel center in map coordinates
#Want to remove 0.5 px offset for upper left corner in gt
out_ul = geolib.pixelToMap(min_x_px - 0.5, min_y_px - 0.5, gt)
#Update stack geotransform
s.gt[0] = out_ul[0]
s.gt[3] = out_ul[1]
#Update new stack extent
s.get_extent()
#Check for and discard emtpy arrays
#Might be faster to reshape then np.ma.count(s.ma_stack, axis=1)
count_list = np.array([i.count() for i in s.ma_stack])
idx = count_list > 0
#Output subset with valid data in next extent
#fn_list, source, error, error_dict_list, date_list, date_list_o
#Note: no need to copy again
s_sub = get_stack_subset(s, idx, out_stack_fn=out_stack_fn, copy=False, save=False)
print("Orig filename:", s_orig.stack_fn)
print("Orig extent:", s_orig.extent)
print("Orig dimensions:", s_orig.ma_stack.shape)
print("Input extent:", extent)
print("New filename:", s_sub.stack_fn)
print("New extent:", s_sub.extent)
print("New dimensions:", s_sub.ma_stack.shape)
if save:
if os.path.abspath(s_orig.stack_fn) == os.path.abspath(s_sub.stack_fn):
print("Original stack would be overwritten!")
print("Skipping save")
else:
s_sub.save = True
s_sub.savestack()
#The following should be unchanged by clip - it is more efficient to clip thes, but easier to regenerate
#if s.stats:
#stack_count, stack_mean, stack_min, stack_max, stack_std
#s.stack_min = s.stack_min[y_slice, x_slice]
#if s.datestack:
#dt_ptp, dt_min, dt_max, dt_center
#if s.med:
#stack_med
#if s.trend:
#trend, intercept, detrended_std
#Recompute stats/etc
return s_sub | python | def stack_clip(s_orig, extent, out_stack_fn=None, copy=True, save=False):
"""Create a new stack object with limited extent from an exising stack object
"""
#Should check for valid extent
#This is not memory efficient, but is much simpler
#To be safe, if we are saving out, create a copy to avoid overwriting
if copy or save:
from copy import deepcopy
print("Copying original DEMStack")
s = deepcopy(s_orig)
else:
#Want to be very careful here, as we could overwrite the original file
s = s_orig
from pygeotools.lib import geolib
gt = s.gt
s_shape = s.ma_stack.shape[1:3]
#Compute pixel bounds for input extent
min_x_px, max_y_px = geolib.mapToPixel(extent[0], extent[1], gt)
max_x_px, min_y_px = geolib.mapToPixel(extent[2], extent[3], gt)
#Clip to stack extent and round to whole integers
min_x_px = int(max(0, min_x_px)+0.5)
max_x_px = int(min(s_shape[1], max_x_px)+0.5)
min_y_px = int(max(0, min_y_px)+0.5)
max_y_px = int(min(s_shape[0], max_y_px)+0.5)
#Clip the stack
x_slice = slice(min_x_px,max_x_px)
y_slice = slice(min_y_px,max_y_px)
s.ma_stack = s.ma_stack[:, y_slice, x_slice]
#Now update geospatial info
#This returns the pixel center in map coordinates
#Want to remove 0.5 px offset for upper left corner in gt
out_ul = geolib.pixelToMap(min_x_px - 0.5, min_y_px - 0.5, gt)
#Update stack geotransform
s.gt[0] = out_ul[0]
s.gt[3] = out_ul[1]
#Update new stack extent
s.get_extent()
#Check for and discard emtpy arrays
#Might be faster to reshape then np.ma.count(s.ma_stack, axis=1)
count_list = np.array([i.count() for i in s.ma_stack])
idx = count_list > 0
#Output subset with valid data in next extent
#fn_list, source, error, error_dict_list, date_list, date_list_o
#Note: no need to copy again
s_sub = get_stack_subset(s, idx, out_stack_fn=out_stack_fn, copy=False, save=False)
print("Orig filename:", s_orig.stack_fn)
print("Orig extent:", s_orig.extent)
print("Orig dimensions:", s_orig.ma_stack.shape)
print("Input extent:", extent)
print("New filename:", s_sub.stack_fn)
print("New extent:", s_sub.extent)
print("New dimensions:", s_sub.ma_stack.shape)
if save:
if os.path.abspath(s_orig.stack_fn) == os.path.abspath(s_sub.stack_fn):
print("Original stack would be overwritten!")
print("Skipping save")
else:
s_sub.save = True
s_sub.savestack()
#The following should be unchanged by clip - it is more efficient to clip thes, but easier to regenerate
#if s.stats:
#stack_count, stack_mean, stack_min, stack_max, stack_std
#s.stack_min = s.stack_min[y_slice, x_slice]
#if s.datestack:
#dt_ptp, dt_min, dt_max, dt_center
#if s.med:
#stack_med
#if s.trend:
#trend, intercept, detrended_std
#Recompute stats/etc
return s_sub | [
"def",
"stack_clip",
"(",
"s_orig",
",",
"extent",
",",
"out_stack_fn",
"=",
"None",
",",
"copy",
"=",
"True",
",",
"save",
"=",
"False",
")",
":",
"#Should check for valid extent",
"#This is not memory efficient, but is much simpler",
"#To be safe, if we are saving out, ... | Create a new stack object with limited extent from an exising stack object | [
"Create",
"a",
"new",
"stack",
"object",
"with",
"limited",
"extent",
"from",
"an",
"exising",
"stack",
"object"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/malib.py#L728-L811 | train | 213,048 |
dshean/pygeotools | pygeotools/lib/malib.py | get_stack_subset | def get_stack_subset(s_orig, idx, out_stack_fn=None, copy=True, save=False):
"""Create a new stack object as a subset of an exising stack object
"""
#This must be a numpy boolean array
idx = np.array(idx)
if np.any(idx):
#This is not memory efficient, but is much simpler
#To be safe, if we are saving out, create a copy to avoid overwriting
if copy or save:
from copy import deepcopy
print("Copying original DEMStack")
s = deepcopy(s_orig)
else:
#Want to be very careful here, as we could overwrite the original file
s = s_orig
#Update fn_list
#Note: need to change fn_list to np.array - object array, allows longer strings
#s.fn_list = s.fn_list[idx]
print("Original stack: %i" % len(s_orig.fn_list))
s.fn_list = (np.array(s.fn_list)[idx]).tolist()
print("Filtered stack: %i" % len(s.fn_list))
#Update date_lists
s.date_list = s.date_list[idx]
s.date_list_o = s.date_list_o[idx]
#Update ma
s.ma_stack = s.ma_stack[idx]
#Update source/error
#s.source = s.source[idx]
s.source = (np.array(s.source)[idx]).tolist()
s.error = s.error[idx]
s.error_dict_list = np.array(s.error_dict_list)[idx]
#Update stack_fn
#out_stack_fn should be full path, with npz
if out_stack_fn is None:
s.stack_fn = None
s.get_stack_fn()
else:
s.stack_fn = out_stack_fn
#Check to make sure we are not going to overwrite
if os.path.abspath(s_orig.stack_fn) == os.path.abspath(s.stack_fn):
print("Warning: new stack has identical filename: %s" % s.stack_fn)
print("As a precaution, new stack will not be saved")
save = False
s.save = save
#Update stats
if s.stats:
s.compute_stats()
if save:
s.write_stats()
#Update datestack
if s.datestack and s.date_list_o.count() > 1:
s.compute_dt_stats()
if save:
s.write_datestack()
#Update trend
if s.trend:
s.compute_trend()
if save:
s.write_trend()
if save:
s.savestack()
else:
print("No valid entries for input index array")
s = None
return s | python | def get_stack_subset(s_orig, idx, out_stack_fn=None, copy=True, save=False):
"""Create a new stack object as a subset of an exising stack object
"""
#This must be a numpy boolean array
idx = np.array(idx)
if np.any(idx):
#This is not memory efficient, but is much simpler
#To be safe, if we are saving out, create a copy to avoid overwriting
if copy or save:
from copy import deepcopy
print("Copying original DEMStack")
s = deepcopy(s_orig)
else:
#Want to be very careful here, as we could overwrite the original file
s = s_orig
#Update fn_list
#Note: need to change fn_list to np.array - object array, allows longer strings
#s.fn_list = s.fn_list[idx]
print("Original stack: %i" % len(s_orig.fn_list))
s.fn_list = (np.array(s.fn_list)[idx]).tolist()
print("Filtered stack: %i" % len(s.fn_list))
#Update date_lists
s.date_list = s.date_list[idx]
s.date_list_o = s.date_list_o[idx]
#Update ma
s.ma_stack = s.ma_stack[idx]
#Update source/error
#s.source = s.source[idx]
s.source = (np.array(s.source)[idx]).tolist()
s.error = s.error[idx]
s.error_dict_list = np.array(s.error_dict_list)[idx]
#Update stack_fn
#out_stack_fn should be full path, with npz
if out_stack_fn is None:
s.stack_fn = None
s.get_stack_fn()
else:
s.stack_fn = out_stack_fn
#Check to make sure we are not going to overwrite
if os.path.abspath(s_orig.stack_fn) == os.path.abspath(s.stack_fn):
print("Warning: new stack has identical filename: %s" % s.stack_fn)
print("As a precaution, new stack will not be saved")
save = False
s.save = save
#Update stats
if s.stats:
s.compute_stats()
if save:
s.write_stats()
#Update datestack
if s.datestack and s.date_list_o.count() > 1:
s.compute_dt_stats()
if save:
s.write_datestack()
#Update trend
if s.trend:
s.compute_trend()
if save:
s.write_trend()
if save:
s.savestack()
else:
print("No valid entries for input index array")
s = None
return s | [
"def",
"get_stack_subset",
"(",
"s_orig",
",",
"idx",
",",
"out_stack_fn",
"=",
"None",
",",
"copy",
"=",
"True",
",",
"save",
"=",
"False",
")",
":",
"#This must be a numpy boolean array",
"idx",
"=",
"np",
".",
"array",
"(",
"idx",
")",
"if",
"np",
"."... | Create a new stack object as a subset of an exising stack object | [
"Create",
"a",
"new",
"stack",
"object",
"as",
"a",
"subset",
"of",
"an",
"exising",
"stack",
"object"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/malib.py#L815-L879 | train | 213,049 |
dshean/pygeotools | pygeotools/lib/malib.py | stack_merge | def stack_merge(s1, s2, out_stack_fn=None, sort=True, save=False):
"""Merge two stack objects
"""
from pygeotools.lib import geolib
from copy import deepcopy
#Assumes input stacks have identical extent, resolution, and projection
if s1.ma_stack.shape[1:3] != s2.ma_stack.shape[1:3]:
print(s1.ma_stack.shape)
print(s2.ma_stack.shape)
sys.exit('Input stacks must have identical array dimensions')
if not geolib.extent_compare(s1.extent, s2.extent):
print(s1.extent)
print(s2.extent)
sys.exit('Input stacks must have identical extent')
if not geolib.res_compare(s1.res, s2.res):
print(s1.res)
print(s2.res)
sys.exit('Input stacks must have identical res')
print("\nCombining fn_list and ma_stack")
fn_list = np.array(s1.fn_list + s2.fn_list)
if sort:
#Sort based on filenames (should be datesort)
sort_idx = np.argsort([os.path.split(x)[-1] for x in fn_list])
else:
sort_idx = Ellipsis
#Now pull out final, sorted order
fn_list = fn_list[sort_idx]
ma_stack = np.ma.vstack((s1.ma_stack, s2.ma_stack))[sort_idx]
#date_list = np.ma.dstack(s1.date_list, s2.date_list)
#date_list_o = np.ma.dstack(s1.date_list_o, s2.date_list_o)
source = np.array(s1.source + s2.source)[sort_idx]
error = np.ma.concatenate([s1.error, s2.error])[sort_idx]
#These are object arrays
error_dict_list = np.concatenate([s1.error_dict_list, s2.error_dict_list])[sort_idx]
print("Creating copy for new stack")
s = deepcopy(s1)
s.fn_list = list(fn_list)
s.ma_stack = ma_stack
s.source = list(source)
s.error = error
s.error_dict_list = error_dict_list
#This will use original stack outdir
if not out_stack_fn:
s.get_stack_fn()
else:
s.stack_fn = out_stack_fn
s.get_date_list()
#These will preserve trend from one stack if present in only one stack
#Useful when combining surface topo and bed topo
if s1.datestack and s2.datestack:
s.compute_dt_stats()
if save and s1.datestack:
s.write_datestack()
if s1.stats and s2.stats:
s.compute_stats()
if save and s1.stats:
s.write_stats()
if s1.trend and s2.trend:
s.compute_trend()
if save and s1.trend:
s.write_trend()
if save:
s.savestack()
return s | python | def stack_merge(s1, s2, out_stack_fn=None, sort=True, save=False):
"""Merge two stack objects
"""
from pygeotools.lib import geolib
from copy import deepcopy
#Assumes input stacks have identical extent, resolution, and projection
if s1.ma_stack.shape[1:3] != s2.ma_stack.shape[1:3]:
print(s1.ma_stack.shape)
print(s2.ma_stack.shape)
sys.exit('Input stacks must have identical array dimensions')
if not geolib.extent_compare(s1.extent, s2.extent):
print(s1.extent)
print(s2.extent)
sys.exit('Input stacks must have identical extent')
if not geolib.res_compare(s1.res, s2.res):
print(s1.res)
print(s2.res)
sys.exit('Input stacks must have identical res')
print("\nCombining fn_list and ma_stack")
fn_list = np.array(s1.fn_list + s2.fn_list)
if sort:
#Sort based on filenames (should be datesort)
sort_idx = np.argsort([os.path.split(x)[-1] for x in fn_list])
else:
sort_idx = Ellipsis
#Now pull out final, sorted order
fn_list = fn_list[sort_idx]
ma_stack = np.ma.vstack((s1.ma_stack, s2.ma_stack))[sort_idx]
#date_list = np.ma.dstack(s1.date_list, s2.date_list)
#date_list_o = np.ma.dstack(s1.date_list_o, s2.date_list_o)
source = np.array(s1.source + s2.source)[sort_idx]
error = np.ma.concatenate([s1.error, s2.error])[sort_idx]
#These are object arrays
error_dict_list = np.concatenate([s1.error_dict_list, s2.error_dict_list])[sort_idx]
print("Creating copy for new stack")
s = deepcopy(s1)
s.fn_list = list(fn_list)
s.ma_stack = ma_stack
s.source = list(source)
s.error = error
s.error_dict_list = error_dict_list
#This will use original stack outdir
if not out_stack_fn:
s.get_stack_fn()
else:
s.stack_fn = out_stack_fn
s.get_date_list()
#These will preserve trend from one stack if present in only one stack
#Useful when combining surface topo and bed topo
if s1.datestack and s2.datestack:
s.compute_dt_stats()
if save and s1.datestack:
s.write_datestack()
if s1.stats and s2.stats:
s.compute_stats()
if save and s1.stats:
s.write_stats()
if s1.trend and s2.trend:
s.compute_trend()
if save and s1.trend:
s.write_trend()
if save:
s.savestack()
return s | [
"def",
"stack_merge",
"(",
"s1",
",",
"s2",
",",
"out_stack_fn",
"=",
"None",
",",
"sort",
"=",
"True",
",",
"save",
"=",
"False",
")",
":",
"from",
"pygeotools",
".",
"lib",
"import",
"geolib",
"from",
"copy",
"import",
"deepcopy",
"#Assumes input stacks ... | Merge two stack objects | [
"Merge",
"two",
"stack",
"objects"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/malib.py#L882-L953 | train | 213,050 |
dshean/pygeotools | pygeotools/lib/malib.py | randomfill | def randomfill(a):
"""Fill masked areas with random noise
This is needed for any fft-based operations
"""
a = checkma(a)
#For data that have already been normalized,
#This provides a proper normal distribution with mean=0 and std=1
#a = (a - a.mean()) / a.std()
#noise = a.mask * (np.random.randn(*a.shape))
noise = a.mask * np.random.normal(a.mean(), a.std(), a.shape)
#Add the noise
b = a.filled(0) + noise
return b | python | def randomfill(a):
"""Fill masked areas with random noise
This is needed for any fft-based operations
"""
a = checkma(a)
#For data that have already been normalized,
#This provides a proper normal distribution with mean=0 and std=1
#a = (a - a.mean()) / a.std()
#noise = a.mask * (np.random.randn(*a.shape))
noise = a.mask * np.random.normal(a.mean(), a.std(), a.shape)
#Add the noise
b = a.filled(0) + noise
return b | [
"def",
"randomfill",
"(",
"a",
")",
":",
"a",
"=",
"checkma",
"(",
"a",
")",
"#For data that have already been normalized,",
"#This provides a proper normal distribution with mean=0 and std=1",
"#a = (a - a.mean()) / a.std()",
"#noise = a.mask * (np.random.randn(*a.shape))",
"noise",... | Fill masked areas with random noise
This is needed for any fft-based operations | [
"Fill",
"masked",
"areas",
"with",
"random",
"noise",
"This",
"is",
"needed",
"for",
"any",
"fft",
"-",
"based",
"operations"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/malib.py#L1459-L1472 | train | 213,051 |
dshean/pygeotools | pygeotools/lib/malib.py | nanfill | def nanfill(a, f_a, *args, **kwargs):
"""Fill masked areas with np.nan
Wrapper for functions that can't handle ma (e.g. scipy.ndimage)
This will force filters to ignore nan, but causes adjacent pixels to be set to nan as well: http://projects.scipy.org/scipy/ticket/1155
"""
a = checkma(a)
ndv = a.fill_value
#Note: The following fails for arrays that are not float (np.nan is float)
b = f_a(a.filled(np.nan), *args, **kwargs)
#the fix_invalid fill_value parameter doesn't seem to work
out = np.ma.fix_invalid(b, copy=False)
out.set_fill_value(ndv)
return out | python | def nanfill(a, f_a, *args, **kwargs):
"""Fill masked areas with np.nan
Wrapper for functions that can't handle ma (e.g. scipy.ndimage)
This will force filters to ignore nan, but causes adjacent pixels to be set to nan as well: http://projects.scipy.org/scipy/ticket/1155
"""
a = checkma(a)
ndv = a.fill_value
#Note: The following fails for arrays that are not float (np.nan is float)
b = f_a(a.filled(np.nan), *args, **kwargs)
#the fix_invalid fill_value parameter doesn't seem to work
out = np.ma.fix_invalid(b, copy=False)
out.set_fill_value(ndv)
return out | [
"def",
"nanfill",
"(",
"a",
",",
"f_a",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"a",
"=",
"checkma",
"(",
"a",
")",
"ndv",
"=",
"a",
".",
"fill_value",
"#Note: The following fails for arrays that are not float (np.nan is float)",
"b",
"=",
"f_a"... | Fill masked areas with np.nan
Wrapper for functions that can't handle ma (e.g. scipy.ndimage)
This will force filters to ignore nan, but causes adjacent pixels to be set to nan as well: http://projects.scipy.org/scipy/ticket/1155 | [
"Fill",
"masked",
"areas",
"with",
"np",
".",
"nan"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/malib.py#L1474-L1488 | train | 213,052 |
dshean/pygeotools | pygeotools/lib/malib.py | fast_median | def fast_median(a):
"""Fast median operation for masked array using 50th-percentile
"""
a = checkma(a)
#return scoreatpercentile(a.compressed(), 50)
if a.count() > 0:
out = np.percentile(a.compressed(), 50)
else:
out = np.ma.masked
return out | python | def fast_median(a):
"""Fast median operation for masked array using 50th-percentile
"""
a = checkma(a)
#return scoreatpercentile(a.compressed(), 50)
if a.count() > 0:
out = np.percentile(a.compressed(), 50)
else:
out = np.ma.masked
return out | [
"def",
"fast_median",
"(",
"a",
")",
":",
"a",
"=",
"checkma",
"(",
"a",
")",
"#return scoreatpercentile(a.compressed(), 50)",
"if",
"a",
".",
"count",
"(",
")",
">",
"0",
":",
"out",
"=",
"np",
".",
"percentile",
"(",
"a",
".",
"compressed",
"(",
")",... | Fast median operation for masked array using 50th-percentile | [
"Fast",
"median",
"operation",
"for",
"masked",
"array",
"using",
"50th",
"-",
"percentile"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/malib.py#L1494-L1503 | train | 213,053 |
dshean/pygeotools | pygeotools/lib/malib.py | mad | def mad(a, axis=None, c=1.4826, return_med=False):
"""Compute normalized median absolute difference
Can also return median array, as this can be expensive, and often we want both med and nmad
Note: 1.4826 = 1/0.6745
"""
a = checkma(a)
#return np.ma.median(np.fabs(a - np.ma.median(a))) / c
if a.count() > 0:
if axis is None:
med = fast_median(a)
out = fast_median(np.fabs(a - med)) * c
else:
med = np.ma.median(a, axis=axis)
#This is necessary for broadcasting
med = np.expand_dims(med, axis=axis)
out = np.ma.median(np.ma.fabs(a - med), axis=axis) * c
else:
out = np.ma.masked
if return_med:
out = (out, med)
return out | python | def mad(a, axis=None, c=1.4826, return_med=False):
"""Compute normalized median absolute difference
Can also return median array, as this can be expensive, and often we want both med and nmad
Note: 1.4826 = 1/0.6745
"""
a = checkma(a)
#return np.ma.median(np.fabs(a - np.ma.median(a))) / c
if a.count() > 0:
if axis is None:
med = fast_median(a)
out = fast_median(np.fabs(a - med)) * c
else:
med = np.ma.median(a, axis=axis)
#This is necessary for broadcasting
med = np.expand_dims(med, axis=axis)
out = np.ma.median(np.ma.fabs(a - med), axis=axis) * c
else:
out = np.ma.masked
if return_med:
out = (out, med)
return out | [
"def",
"mad",
"(",
"a",
",",
"axis",
"=",
"None",
",",
"c",
"=",
"1.4826",
",",
"return_med",
"=",
"False",
")",
":",
"a",
"=",
"checkma",
"(",
"a",
")",
"#return np.ma.median(np.fabs(a - np.ma.median(a))) / c",
"if",
"a",
".",
"count",
"(",
")",
">",
... | Compute normalized median absolute difference
Can also return median array, as this can be expensive, and often we want both med and nmad
Note: 1.4826 = 1/0.6745 | [
"Compute",
"normalized",
"median",
"absolute",
"difference",
"Can",
"also",
"return",
"median",
"array",
"as",
"this",
"can",
"be",
"expensive",
"and",
"often",
"we",
"want",
"both",
"med",
"and",
"nmad"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/malib.py#L1505-L1527 | train | 213,054 |
dshean/pygeotools | pygeotools/lib/malib.py | calcperc | def calcperc(b, perc=(0.1,99.9)):
"""Calculate values at specified percentiles
"""
b = checkma(b)
if b.count() > 0:
#low = scoreatpercentile(b.compressed(), perc[0])
#high = scoreatpercentile(b.compressed(), perc[1])
low = np.percentile(b.compressed(), perc[0])
high = np.percentile(b.compressed(), perc[1])
else:
low = 0
high = 0
#low = scipy.stats.mstats.scoreatpercentile(b, perc[0])
#high = scipy.stats.mstats.scoreatpercentile(b, perc[1])
#This approach can be used for unmasked array, but values less than 0 are problematic
#bma_low = b.min()
#bma_high = b.max()
#low = scipy.stats.scoreatpercentile(b.data.flatten(), perc[0], (bma_low, bma_high))
#high = scipy.stats.scoreatpercentile(b.data.flatten(), perc[1], (bma_low, bma_high))
return low, high | python | def calcperc(b, perc=(0.1,99.9)):
"""Calculate values at specified percentiles
"""
b = checkma(b)
if b.count() > 0:
#low = scoreatpercentile(b.compressed(), perc[0])
#high = scoreatpercentile(b.compressed(), perc[1])
low = np.percentile(b.compressed(), perc[0])
high = np.percentile(b.compressed(), perc[1])
else:
low = 0
high = 0
#low = scipy.stats.mstats.scoreatpercentile(b, perc[0])
#high = scipy.stats.mstats.scoreatpercentile(b, perc[1])
#This approach can be used for unmasked array, but values less than 0 are problematic
#bma_low = b.min()
#bma_high = b.max()
#low = scipy.stats.scoreatpercentile(b.data.flatten(), perc[0], (bma_low, bma_high))
#high = scipy.stats.scoreatpercentile(b.data.flatten(), perc[1], (bma_low, bma_high))
return low, high | [
"def",
"calcperc",
"(",
"b",
",",
"perc",
"=",
"(",
"0.1",
",",
"99.9",
")",
")",
":",
"b",
"=",
"checkma",
"(",
"b",
")",
"if",
"b",
".",
"count",
"(",
")",
">",
"0",
":",
"#low = scoreatpercentile(b.compressed(), perc[0])",
"#high = scoreatpercentile(b.c... | Calculate values at specified percentiles | [
"Calculate",
"values",
"at",
"specified",
"percentiles"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/malib.py#L1530-L1551 | train | 213,055 |
dshean/pygeotools | pygeotools/lib/malib.py | calcperc_sym | def calcperc_sym(b, perc=(0.1,99.9)):
"""
Get symmetrical percentile values
Useful for determining clim centered on 0 for difference maps
"""
clim = np.max(np.abs(calcperc(b, perc)))
#clim = (-clim, clim)
return -clim, clim | python | def calcperc_sym(b, perc=(0.1,99.9)):
"""
Get symmetrical percentile values
Useful for determining clim centered on 0 for difference maps
"""
clim = np.max(np.abs(calcperc(b, perc)))
#clim = (-clim, clim)
return -clim, clim | [
"def",
"calcperc_sym",
"(",
"b",
",",
"perc",
"=",
"(",
"0.1",
",",
"99.9",
")",
")",
":",
"clim",
"=",
"np",
".",
"max",
"(",
"np",
".",
"abs",
"(",
"calcperc",
"(",
"b",
",",
"perc",
")",
")",
")",
"#clim = (-clim, clim)",
"return",
"-",
"clim"... | Get symmetrical percentile values
Useful for determining clim centered on 0 for difference maps | [
"Get",
"symmetrical",
"percentile",
"values",
"Useful",
"for",
"determining",
"clim",
"centered",
"on",
"0",
"for",
"difference",
"maps"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/malib.py#L1553-L1560 | train | 213,056 |
dshean/pygeotools | pygeotools/lib/malib.py | iqr | def iqr(b, perc=(25, 75)):
"""Inter-quartile range
"""
b = checkma(b)
low, high = calcperc(b, perc)
return low, high, high - low | python | def iqr(b, perc=(25, 75)):
"""Inter-quartile range
"""
b = checkma(b)
low, high = calcperc(b, perc)
return low, high, high - low | [
"def",
"iqr",
"(",
"b",
",",
"perc",
"=",
"(",
"25",
",",
"75",
")",
")",
":",
"b",
"=",
"checkma",
"(",
"b",
")",
"low",
",",
"high",
"=",
"calcperc",
"(",
"b",
",",
"perc",
")",
"return",
"low",
",",
"high",
",",
"high",
"-",
"low"
] | Inter-quartile range | [
"Inter",
"-",
"quartile",
"range"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/malib.py#L1562-L1567 | train | 213,057 |
dshean/pygeotools | pygeotools/lib/malib.py | iv | def iv(b, **kwargs):
"""Quick access to imview for interactive sessions
"""
import matplotlib.pyplot as plt
import imview.imviewer as imview
b = checkma(b)
#if hasattr(kwargs,'imshow_kwargs'):
# kwargs['imshow_kwargs']['interpolation'] = 'bicubic'
#else:
# kwargs['imshow_kwargs'] = {'interpolation': 'bicubic'}
#bma_fig(fig, bma, cmap='gist_rainbow_r', clim=None, bg=None, n_subplt=1, subplt=1, label=None, **imshow_kwargs)
fig = plt.figure()
imview.bma_fig(fig, b, **kwargs)
plt.show()
return fig | python | def iv(b, **kwargs):
"""Quick access to imview for interactive sessions
"""
import matplotlib.pyplot as plt
import imview.imviewer as imview
b = checkma(b)
#if hasattr(kwargs,'imshow_kwargs'):
# kwargs['imshow_kwargs']['interpolation'] = 'bicubic'
#else:
# kwargs['imshow_kwargs'] = {'interpolation': 'bicubic'}
#bma_fig(fig, bma, cmap='gist_rainbow_r', clim=None, bg=None, n_subplt=1, subplt=1, label=None, **imshow_kwargs)
fig = plt.figure()
imview.bma_fig(fig, b, **kwargs)
plt.show()
return fig | [
"def",
"iv",
"(",
"b",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"import",
"imview",
".",
"imviewer",
"as",
"imview",
"b",
"=",
"checkma",
"(",
"b",
")",
"#if hasattr(kwargs,'imshow_kwargs'):",
"# kwargs['imsho... | Quick access to imview for interactive sessions | [
"Quick",
"access",
"to",
"imview",
"for",
"interactive",
"sessions"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/malib.py#L1688-L1702 | train | 213,058 |
dshean/pygeotools | pygeotools/lib/malib.py | norm_shape | def norm_shape(shape):
'''
Normalize numpy array shapes so they're always expressed as a tuple,
even for one-dimensional shapes.
Parameters
shape - an int, or a tuple of ints
Returns
a shape tuple
'''
try:
i = int(shape)
return (i,)
except TypeError:
# shape was not a number
pass
try:
t = tuple(shape)
return t
except TypeError:
# shape was not iterable
pass
raise TypeError('shape must be an int, or a tuple of ints') | python | def norm_shape(shape):
'''
Normalize numpy array shapes so they're always expressed as a tuple,
even for one-dimensional shapes.
Parameters
shape - an int, or a tuple of ints
Returns
a shape tuple
'''
try:
i = int(shape)
return (i,)
except TypeError:
# shape was not a number
pass
try:
t = tuple(shape)
return t
except TypeError:
# shape was not iterable
pass
raise TypeError('shape must be an int, or a tuple of ints') | [
"def",
"norm_shape",
"(",
"shape",
")",
":",
"try",
":",
"i",
"=",
"int",
"(",
"shape",
")",
"return",
"(",
"i",
",",
")",
"except",
"TypeError",
":",
"# shape was not a number",
"pass",
"try",
":",
"t",
"=",
"tuple",
"(",
"shape",
")",
"return",
"t"... | Normalize numpy array shapes so they're always expressed as a tuple,
even for one-dimensional shapes.
Parameters
shape - an int, or a tuple of ints
Returns
a shape tuple | [
"Normalize",
"numpy",
"array",
"shapes",
"so",
"they",
"re",
"always",
"expressed",
"as",
"a",
"tuple",
"even",
"for",
"one",
"-",
"dimensional",
"shapes",
".",
"Parameters",
"shape",
"-",
"an",
"int",
"or",
"a",
"tuple",
"of",
"ints",
"Returns",
"a",
"s... | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/malib.py#L1954-L1979 | train | 213,059 |
dshean/pygeotools | pygeotools/lib/geolib.py | localortho | def localortho(lon, lat):
"""Create srs for local orthographic projection centered at lat, lon
"""
local_srs = osr.SpatialReference()
local_proj = '+proj=ortho +lat_0=%0.7f +lon_0=%0.7f +datum=WGS84 +units=m +no_defs ' % (lat, lon)
local_srs.ImportFromProj4(local_proj)
return local_srs | python | def localortho(lon, lat):
"""Create srs for local orthographic projection centered at lat, lon
"""
local_srs = osr.SpatialReference()
local_proj = '+proj=ortho +lat_0=%0.7f +lon_0=%0.7f +datum=WGS84 +units=m +no_defs ' % (lat, lon)
local_srs.ImportFromProj4(local_proj)
return local_srs | [
"def",
"localortho",
"(",
"lon",
",",
"lat",
")",
":",
"local_srs",
"=",
"osr",
".",
"SpatialReference",
"(",
")",
"local_proj",
"=",
"'+proj=ortho +lat_0=%0.7f +lon_0=%0.7f +datum=WGS84 +units=m +no_defs '",
"%",
"(",
"lat",
",",
"lon",
")",
"local_srs",
".",
"Im... | Create srs for local orthographic projection centered at lat, lon | [
"Create",
"srs",
"for",
"local",
"orthographic",
"projection",
"centered",
"at",
"lat",
"lon"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L235-L241 | train | 213,060 |
dshean/pygeotools | pygeotools/lib/geolib.py | geom2localortho | def geom2localortho(geom):
"""Convert existing geom to local orthographic projection
Useful for local cartesian distance/area calculations
"""
cx, cy = geom.Centroid().GetPoint_2D()
lon, lat, z = cT_helper(cx, cy, 0, geom.GetSpatialReference(), wgs_srs)
local_srs = localortho(lon,lat)
local_geom = geom_dup(geom)
geom_transform(local_geom, local_srs)
return local_geom | python | def geom2localortho(geom):
"""Convert existing geom to local orthographic projection
Useful for local cartesian distance/area calculations
"""
cx, cy = geom.Centroid().GetPoint_2D()
lon, lat, z = cT_helper(cx, cy, 0, geom.GetSpatialReference(), wgs_srs)
local_srs = localortho(lon,lat)
local_geom = geom_dup(geom)
geom_transform(local_geom, local_srs)
return local_geom | [
"def",
"geom2localortho",
"(",
"geom",
")",
":",
"cx",
",",
"cy",
"=",
"geom",
".",
"Centroid",
"(",
")",
".",
"GetPoint_2D",
"(",
")",
"lon",
",",
"lat",
",",
"z",
"=",
"cT_helper",
"(",
"cx",
",",
"cy",
",",
"0",
",",
"geom",
".",
"GetSpatialRe... | Convert existing geom to local orthographic projection
Useful for local cartesian distance/area calculations | [
"Convert",
"existing",
"geom",
"to",
"local",
"orthographic",
"projection"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L244-L254 | train | 213,061 |
dshean/pygeotools | pygeotools/lib/geolib.py | dd2dms | def dd2dms(dd):
"""Convert decimal degrees to degrees, minutes, seconds
"""
n = dd < 0
dd = abs(dd)
m,s = divmod(dd*3600,60)
d,m = divmod(m,60)
if n:
d = -d
return d,m,s | python | def dd2dms(dd):
"""Convert decimal degrees to degrees, minutes, seconds
"""
n = dd < 0
dd = abs(dd)
m,s = divmod(dd*3600,60)
d,m = divmod(m,60)
if n:
d = -d
return d,m,s | [
"def",
"dd2dms",
"(",
"dd",
")",
":",
"n",
"=",
"dd",
"<",
"0",
"dd",
"=",
"abs",
"(",
"dd",
")",
"m",
",",
"s",
"=",
"divmod",
"(",
"dd",
"*",
"3600",
",",
"60",
")",
"d",
",",
"m",
"=",
"divmod",
"(",
"m",
",",
"60",
")",
"if",
"n",
... | Convert decimal degrees to degrees, minutes, seconds | [
"Convert",
"decimal",
"degrees",
"to",
"degrees",
"minutes",
"seconds"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L353-L362 | train | 213,062 |
dshean/pygeotools | pygeotools/lib/geolib.py | dms2dd | def dms2dd(d,m,s):
"""Convert degrees, minutes, seconds to decimal degrees
"""
if d < 0:
sign = -1
else:
sign = 1
dd = sign * (int(abs(d)) + float(m) / 60 + float(s) / 3600)
return dd | python | def dms2dd(d,m,s):
"""Convert degrees, minutes, seconds to decimal degrees
"""
if d < 0:
sign = -1
else:
sign = 1
dd = sign * (int(abs(d)) + float(m) / 60 + float(s) / 3600)
return dd | [
"def",
"dms2dd",
"(",
"d",
",",
"m",
",",
"s",
")",
":",
"if",
"d",
"<",
"0",
":",
"sign",
"=",
"-",
"1",
"else",
":",
"sign",
"=",
"1",
"dd",
"=",
"sign",
"*",
"(",
"int",
"(",
"abs",
"(",
"d",
")",
")",
"+",
"float",
"(",
"m",
")",
... | Convert degrees, minutes, seconds to decimal degrees | [
"Convert",
"degrees",
"minutes",
"seconds",
"to",
"decimal",
"degrees"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L364-L372 | train | 213,063 |
dshean/pygeotools | pygeotools/lib/geolib.py | dd2dm | def dd2dm(dd):
"""Convert decimal to degrees, decimal minutes
"""
d,m,s = dd2dms(dd)
m = m + float(s)/3600
return d,m,s | python | def dd2dm(dd):
"""Convert decimal to degrees, decimal minutes
"""
d,m,s = dd2dms(dd)
m = m + float(s)/3600
return d,m,s | [
"def",
"dd2dm",
"(",
"dd",
")",
":",
"d",
",",
"m",
",",
"s",
"=",
"dd2dms",
"(",
"dd",
")",
"m",
"=",
"m",
"+",
"float",
"(",
"s",
")",
"/",
"3600",
"return",
"d",
",",
"m",
",",
"s"
] | Convert decimal to degrees, decimal minutes | [
"Convert",
"decimal",
"to",
"degrees",
"decimal",
"minutes"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L397-L402 | train | 213,064 |
dshean/pygeotools | pygeotools/lib/geolib.py | mapToPixel | def mapToPixel(mX, mY, geoTransform):
"""Convert map coordinates to pixel coordinates based on geotransform
Accepts float or NumPy arrays
GDAL model used here - upper left corner of upper left pixel for mX, mY (and in GeoTransform)
"""
mX = np.asarray(mX)
mY = np.asarray(mY)
if geoTransform[2] + geoTransform[4] == 0:
pX = ((mX - geoTransform[0]) / geoTransform[1]) - 0.5
pY = ((mY - geoTransform[3]) / geoTransform[5]) - 0.5
else:
pX, pY = applyGeoTransform(mX, mY, invertGeoTransform(geoTransform))
#return int(pX), int(pY)
return pX, pY | python | def mapToPixel(mX, mY, geoTransform):
"""Convert map coordinates to pixel coordinates based on geotransform
Accepts float or NumPy arrays
GDAL model used here - upper left corner of upper left pixel for mX, mY (and in GeoTransform)
"""
mX = np.asarray(mX)
mY = np.asarray(mY)
if geoTransform[2] + geoTransform[4] == 0:
pX = ((mX - geoTransform[0]) / geoTransform[1]) - 0.5
pY = ((mY - geoTransform[3]) / geoTransform[5]) - 0.5
else:
pX, pY = applyGeoTransform(mX, mY, invertGeoTransform(geoTransform))
#return int(pX), int(pY)
return pX, pY | [
"def",
"mapToPixel",
"(",
"mX",
",",
"mY",
",",
"geoTransform",
")",
":",
"mX",
"=",
"np",
".",
"asarray",
"(",
"mX",
")",
"mY",
"=",
"np",
".",
"asarray",
"(",
"mY",
")",
"if",
"geoTransform",
"[",
"2",
"]",
"+",
"geoTransform",
"[",
"4",
"]",
... | Convert map coordinates to pixel coordinates based on geotransform
Accepts float or NumPy arrays
GDAL model used here - upper left corner of upper left pixel for mX, mY (and in GeoTransform) | [
"Convert",
"map",
"coordinates",
"to",
"pixel",
"coordinates",
"based",
"on",
"geotransform",
"Accepts",
"float",
"or",
"NumPy",
"arrays"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L404-L419 | train | 213,065 |
dshean/pygeotools | pygeotools/lib/geolib.py | pixelToMap | def pixelToMap(pX, pY, geoTransform):
"""Convert pixel coordinates to map coordinates based on geotransform
Accepts float or NumPy arrays
GDAL model used here - upper left corner of upper left pixel for mX, mY (and in GeoTransform)
"""
pX = np.asarray(pX, dtype=float)
pY = np.asarray(pY, dtype=float)
pX += 0.5
pY += 0.5
mX, mY = applyGeoTransform(pX, pY, geoTransform)
return mX, mY | python | def pixelToMap(pX, pY, geoTransform):
"""Convert pixel coordinates to map coordinates based on geotransform
Accepts float or NumPy arrays
GDAL model used here - upper left corner of upper left pixel for mX, mY (and in GeoTransform)
"""
pX = np.asarray(pX, dtype=float)
pY = np.asarray(pY, dtype=float)
pX += 0.5
pY += 0.5
mX, mY = applyGeoTransform(pX, pY, geoTransform)
return mX, mY | [
"def",
"pixelToMap",
"(",
"pX",
",",
"pY",
",",
"geoTransform",
")",
":",
"pX",
"=",
"np",
".",
"asarray",
"(",
"pX",
",",
"dtype",
"=",
"float",
")",
"pY",
"=",
"np",
".",
"asarray",
"(",
"pY",
",",
"dtype",
"=",
"float",
")",
"pX",
"+=",
"0.5... | Convert pixel coordinates to map coordinates based on geotransform
Accepts float or NumPy arrays
GDAL model used here - upper left corner of upper left pixel for mX, mY (and in GeoTransform) | [
"Convert",
"pixel",
"coordinates",
"to",
"map",
"coordinates",
"based",
"on",
"geotransform",
"Accepts",
"float",
"or",
"NumPy",
"arrays"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L422-L434 | train | 213,066 |
dshean/pygeotools | pygeotools/lib/geolib.py | mem_ds | def mem_ds(res, extent, srs=None, dtype=gdal.GDT_Float32):
"""Create a new GDAL Dataset in memory
Useful for various applications that require a Dataset
"""
#These round down to int
#dst_ns = int((extent[2] - extent[0])/res)
#dst_nl = int((extent[3] - extent[1])/res)
#This should pad by 1 pixel, but not if extent and res were calculated together to give whole int
dst_ns = int((extent[2] - extent[0])/res + 0.99)
dst_nl = int((extent[3] - extent[1])/res + 0.99)
m_ds = gdal.GetDriverByName('MEM').Create('', dst_ns, dst_nl, 1, dtype)
m_gt = [extent[0], res, 0, extent[3], 0, -res]
m_ds.SetGeoTransform(m_gt)
if srs is not None:
m_ds.SetProjection(srs.ExportToWkt())
return m_ds | python | def mem_ds(res, extent, srs=None, dtype=gdal.GDT_Float32):
"""Create a new GDAL Dataset in memory
Useful for various applications that require a Dataset
"""
#These round down to int
#dst_ns = int((extent[2] - extent[0])/res)
#dst_nl = int((extent[3] - extent[1])/res)
#This should pad by 1 pixel, but not if extent and res were calculated together to give whole int
dst_ns = int((extent[2] - extent[0])/res + 0.99)
dst_nl = int((extent[3] - extent[1])/res + 0.99)
m_ds = gdal.GetDriverByName('MEM').Create('', dst_ns, dst_nl, 1, dtype)
m_gt = [extent[0], res, 0, extent[3], 0, -res]
m_ds.SetGeoTransform(m_gt)
if srs is not None:
m_ds.SetProjection(srs.ExportToWkt())
return m_ds | [
"def",
"mem_ds",
"(",
"res",
",",
"extent",
",",
"srs",
"=",
"None",
",",
"dtype",
"=",
"gdal",
".",
"GDT_Float32",
")",
":",
"#These round down to int",
"#dst_ns = int((extent[2] - extent[0])/res)",
"#dst_nl = int((extent[3] - extent[1])/res)",
"#This should pad by 1 pixel... | Create a new GDAL Dataset in memory
Useful for various applications that require a Dataset | [
"Create",
"a",
"new",
"GDAL",
"Dataset",
"in",
"memory"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L596-L612 | train | 213,067 |
dshean/pygeotools | pygeotools/lib/geolib.py | copyproj | def copyproj(src_fn, dst_fn, gt=True):
"""Copy projection and geotransform from one raster file to another
"""
src_ds = gdal.Open(src_fn, gdal.GA_ReadOnly)
dst_ds = gdal.Open(dst_fn, gdal.GA_Update)
dst_ds.SetProjection(src_ds.GetProjection())
if gt:
src_gt = np.array(src_ds.GetGeoTransform())
src_dim = np.array([src_ds.RasterXSize, src_ds.RasterYSize])
dst_dim = np.array([dst_ds.RasterXSize, dst_ds.RasterYSize])
#This preserves dst_fn resolution
if np.any(src_dim != dst_dim):
res_factor = src_dim/dst_dim.astype(float)
src_gt[[1, 5]] *= max(res_factor)
#src_gt[[1, 5]] *= min(res_factor)
#src_gt[[1, 5]] *= res_factor
dst_ds.SetGeoTransform(src_gt)
src_ds = None
dst_ds = None | python | def copyproj(src_fn, dst_fn, gt=True):
"""Copy projection and geotransform from one raster file to another
"""
src_ds = gdal.Open(src_fn, gdal.GA_ReadOnly)
dst_ds = gdal.Open(dst_fn, gdal.GA_Update)
dst_ds.SetProjection(src_ds.GetProjection())
if gt:
src_gt = np.array(src_ds.GetGeoTransform())
src_dim = np.array([src_ds.RasterXSize, src_ds.RasterYSize])
dst_dim = np.array([dst_ds.RasterXSize, dst_ds.RasterYSize])
#This preserves dst_fn resolution
if np.any(src_dim != dst_dim):
res_factor = src_dim/dst_dim.astype(float)
src_gt[[1, 5]] *= max(res_factor)
#src_gt[[1, 5]] *= min(res_factor)
#src_gt[[1, 5]] *= res_factor
dst_ds.SetGeoTransform(src_gt)
src_ds = None
dst_ds = None | [
"def",
"copyproj",
"(",
"src_fn",
",",
"dst_fn",
",",
"gt",
"=",
"True",
")",
":",
"src_ds",
"=",
"gdal",
".",
"Open",
"(",
"src_fn",
",",
"gdal",
".",
"GA_ReadOnly",
")",
"dst_ds",
"=",
"gdal",
".",
"Open",
"(",
"dst_fn",
",",
"gdal",
".",
"GA_Upd... | Copy projection and geotransform from one raster file to another | [
"Copy",
"projection",
"and",
"geotransform",
"from",
"one",
"raster",
"file",
"to",
"another"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L615-L633 | train | 213,068 |
dshean/pygeotools | pygeotools/lib/geolib.py | geom_transform | def geom_transform(geom, t_srs):
"""Transform a geometry in place
"""
s_srs = geom.GetSpatialReference()
if not s_srs.IsSame(t_srs):
ct = osr.CoordinateTransformation(s_srs, t_srs)
geom.Transform(ct)
geom.AssignSpatialReference(t_srs) | python | def geom_transform(geom, t_srs):
"""Transform a geometry in place
"""
s_srs = geom.GetSpatialReference()
if not s_srs.IsSame(t_srs):
ct = osr.CoordinateTransformation(s_srs, t_srs)
geom.Transform(ct)
geom.AssignSpatialReference(t_srs) | [
"def",
"geom_transform",
"(",
"geom",
",",
"t_srs",
")",
":",
"s_srs",
"=",
"geom",
".",
"GetSpatialReference",
"(",
")",
"if",
"not",
"s_srs",
".",
"IsSame",
"(",
"t_srs",
")",
":",
"ct",
"=",
"osr",
".",
"CoordinateTransformation",
"(",
"s_srs",
",",
... | Transform a geometry in place | [
"Transform",
"a",
"geometry",
"in",
"place"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L647-L654 | train | 213,069 |
dshean/pygeotools | pygeotools/lib/geolib.py | shp_dict | def shp_dict(shp_fn, fields=None, geom=True):
"""Get a dictionary for all features in a shapefile
Optionally, specify fields
"""
from pygeotools.lib import timelib
ds = ogr.Open(shp_fn)
lyr = ds.GetLayer()
nfeat = lyr.GetFeatureCount()
print('%i input features\n' % nfeat)
if fields is None:
fields = shp_fieldnames(lyr)
d_list = []
for n,feat in enumerate(lyr):
d = {}
if geom:
geom = feat.GetGeometryRef()
d['geom'] = geom
for f_name in fields:
i = str(feat.GetField(f_name))
if 'date' in f_name:
# date_f = f_name
#If d is float, clear off decimal
i = i.rsplit('.')[0]
i = timelib.strptime_fuzzy(str(i))
d[f_name] = i
d_list.append(d)
#d_list_sort = sorted(d_list, key=lambda k: k[date_f])
return d_list | python | def shp_dict(shp_fn, fields=None, geom=True):
"""Get a dictionary for all features in a shapefile
Optionally, specify fields
"""
from pygeotools.lib import timelib
ds = ogr.Open(shp_fn)
lyr = ds.GetLayer()
nfeat = lyr.GetFeatureCount()
print('%i input features\n' % nfeat)
if fields is None:
fields = shp_fieldnames(lyr)
d_list = []
for n,feat in enumerate(lyr):
d = {}
if geom:
geom = feat.GetGeometryRef()
d['geom'] = geom
for f_name in fields:
i = str(feat.GetField(f_name))
if 'date' in f_name:
# date_f = f_name
#If d is float, clear off decimal
i = i.rsplit('.')[0]
i = timelib.strptime_fuzzy(str(i))
d[f_name] = i
d_list.append(d)
#d_list_sort = sorted(d_list, key=lambda k: k[date_f])
return d_list | [
"def",
"shp_dict",
"(",
"shp_fn",
",",
"fields",
"=",
"None",
",",
"geom",
"=",
"True",
")",
":",
"from",
"pygeotools",
".",
"lib",
"import",
"timelib",
"ds",
"=",
"ogr",
".",
"Open",
"(",
"shp_fn",
")",
"lyr",
"=",
"ds",
".",
"GetLayer",
"(",
")",... | Get a dictionary for all features in a shapefile
Optionally, specify fields | [
"Get",
"a",
"dictionary",
"for",
"all",
"features",
"in",
"a",
"shapefile",
"Optionally",
"specify",
"fields"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L663-L691 | train | 213,070 |
dshean/pygeotools | pygeotools/lib/geolib.py | lyr_proj | def lyr_proj(lyr, t_srs, preserve_fields=True):
"""Reproject an OGR layer
"""
#Need to check t_srs
s_srs = lyr.GetSpatialRef()
cT = osr.CoordinateTransformation(s_srs, t_srs)
#Do everything in memory
drv = ogr.GetDriverByName('Memory')
#Might want to save clipped, warped shp to disk?
# create the output layer
#drv = ogr.GetDriverByName('ESRI Shapefile')
#out_fn = '/tmp/temp.shp'
#if os.path.exists(out_fn):
# driver.DeleteDataSource(out_fn)
#out_ds = driver.CreateDataSource(out_fn)
out_ds = drv.CreateDataSource('out')
outlyr = out_ds.CreateLayer('out', srs=t_srs, geom_type=lyr.GetGeomType())
if preserve_fields:
# add fields
inLayerDefn = lyr.GetLayerDefn()
for i in range(0, inLayerDefn.GetFieldCount()):
fieldDefn = inLayerDefn.GetFieldDefn(i)
outlyr.CreateField(fieldDefn)
# get the output layer's feature definition
outLayerDefn = outlyr.GetLayerDefn()
# loop through the input features
inFeature = lyr.GetNextFeature()
while inFeature:
# get the input geometry
geom = inFeature.GetGeometryRef()
# reproject the geometry
geom.Transform(cT)
# create a new feature
outFeature = ogr.Feature(outLayerDefn)
# set the geometry and attribute
outFeature.SetGeometry(geom)
if preserve_fields:
for i in range(0, outLayerDefn.GetFieldCount()):
outFeature.SetField(outLayerDefn.GetFieldDefn(i).GetNameRef(), inFeature.GetField(i))
# add the feature to the shapefile
outlyr.CreateFeature(outFeature)
# destroy the features and get the next input feature
inFeature = lyr.GetNextFeature()
#NOTE: have to operate on ds here rather than lyr, otherwise segfault
return out_ds | python | def lyr_proj(lyr, t_srs, preserve_fields=True):
"""Reproject an OGR layer
"""
#Need to check t_srs
s_srs = lyr.GetSpatialRef()
cT = osr.CoordinateTransformation(s_srs, t_srs)
#Do everything in memory
drv = ogr.GetDriverByName('Memory')
#Might want to save clipped, warped shp to disk?
# create the output layer
#drv = ogr.GetDriverByName('ESRI Shapefile')
#out_fn = '/tmp/temp.shp'
#if os.path.exists(out_fn):
# driver.DeleteDataSource(out_fn)
#out_ds = driver.CreateDataSource(out_fn)
out_ds = drv.CreateDataSource('out')
outlyr = out_ds.CreateLayer('out', srs=t_srs, geom_type=lyr.GetGeomType())
if preserve_fields:
# add fields
inLayerDefn = lyr.GetLayerDefn()
for i in range(0, inLayerDefn.GetFieldCount()):
fieldDefn = inLayerDefn.GetFieldDefn(i)
outlyr.CreateField(fieldDefn)
# get the output layer's feature definition
outLayerDefn = outlyr.GetLayerDefn()
# loop through the input features
inFeature = lyr.GetNextFeature()
while inFeature:
# get the input geometry
geom = inFeature.GetGeometryRef()
# reproject the geometry
geom.Transform(cT)
# create a new feature
outFeature = ogr.Feature(outLayerDefn)
# set the geometry and attribute
outFeature.SetGeometry(geom)
if preserve_fields:
for i in range(0, outLayerDefn.GetFieldCount()):
outFeature.SetField(outLayerDefn.GetFieldDefn(i).GetNameRef(), inFeature.GetField(i))
# add the feature to the shapefile
outlyr.CreateFeature(outFeature)
# destroy the features and get the next input feature
inFeature = lyr.GetNextFeature()
#NOTE: have to operate on ds here rather than lyr, otherwise segfault
return out_ds | [
"def",
"lyr_proj",
"(",
"lyr",
",",
"t_srs",
",",
"preserve_fields",
"=",
"True",
")",
":",
"#Need to check t_srs",
"s_srs",
"=",
"lyr",
".",
"GetSpatialRef",
"(",
")",
"cT",
"=",
"osr",
".",
"CoordinateTransformation",
"(",
"s_srs",
",",
"t_srs",
")",
"#D... | Reproject an OGR layer | [
"Reproject",
"an",
"OGR",
"layer"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L693-L742 | train | 213,071 |
dshean/pygeotools | pygeotools/lib/geolib.py | raster_shpclip | def raster_shpclip(r_fn, shp_fn, extent='raster', bbox=False, pad=None, invert=False, verbose=False):
"""Clip an input raster by input polygon shapefile for given extent
"""
from pygeotools.lib import iolib
from pygeotools.lib import warplib
r_ds = iolib.fn_getds(r_fn)
r_srs = get_ds_srs(r_ds)
r_extent = ds_extent(r_ds)
r_extent_geom = bbox2geom(r_extent)
#NOTE: want to add spatial filter here to avoid reprojeting global RGI polygons, for example
shp_ds = ogr.Open(shp_fn)
lyr = shp_ds.GetLayer()
shp_srs = lyr.GetSpatialRef()
if not r_srs.IsSame(shp_srs):
shp_ds = lyr_proj(lyr, r_srs)
lyr = shp_ds.GetLayer()
#This returns xmin, ymin, xmax, ymax
shp_extent = lyr_extent(lyr)
shp_extent_geom = bbox2geom(shp_extent)
#Define the output - can set to either raster or shp
#Could accept as cl arg
out_srs = r_srs
if extent == 'raster':
out_extent = r_extent
elif extent == 'shp':
out_extent = shp_extent
elif extent == 'intersection':
out_extent = geom_intersection([r_extent_geom, shp_extent_geom])
elif extent == 'union':
out_extent = geom_union([r_extent_geom, shp_extent_geom])
else:
print("Unexpected extent specification, reverting to input raster extent")
out_extent = 'raster'
#Add padding around shp_extent
#Should implement buffer here
if pad is not None:
out_extent = pad_extent(out_extent, width=pad)
print("Raster to clip: %s\nShapefile used to clip: %s" % (r_fn, shp_fn))
if verbose:
print(shp_extent)
print(r_extent)
print(out_extent)
r_ds = warplib.memwarp(r_ds, extent=out_extent, t_srs=out_srs, r='cubic')
r = iolib.ds_getma(r_ds)
#If bbox, return without clipping, otherwise, clip to polygons
if not bbox:
#Create binary mask from shp
mask = shp2array(shp_fn, r_ds)
if invert:
mask = ~(mask)
#Now apply the mask
r = np.ma.array(r, mask=mask)
#Return both the array and the dataset, needed for writing out
#Should probably just write r to r_ds and return r_ds
return r, r_ds | python | def raster_shpclip(r_fn, shp_fn, extent='raster', bbox=False, pad=None, invert=False, verbose=False):
"""Clip an input raster by input polygon shapefile for given extent
"""
from pygeotools.lib import iolib
from pygeotools.lib import warplib
r_ds = iolib.fn_getds(r_fn)
r_srs = get_ds_srs(r_ds)
r_extent = ds_extent(r_ds)
r_extent_geom = bbox2geom(r_extent)
#NOTE: want to add spatial filter here to avoid reprojeting global RGI polygons, for example
shp_ds = ogr.Open(shp_fn)
lyr = shp_ds.GetLayer()
shp_srs = lyr.GetSpatialRef()
if not r_srs.IsSame(shp_srs):
shp_ds = lyr_proj(lyr, r_srs)
lyr = shp_ds.GetLayer()
#This returns xmin, ymin, xmax, ymax
shp_extent = lyr_extent(lyr)
shp_extent_geom = bbox2geom(shp_extent)
#Define the output - can set to either raster or shp
#Could accept as cl arg
out_srs = r_srs
if extent == 'raster':
out_extent = r_extent
elif extent == 'shp':
out_extent = shp_extent
elif extent == 'intersection':
out_extent = geom_intersection([r_extent_geom, shp_extent_geom])
elif extent == 'union':
out_extent = geom_union([r_extent_geom, shp_extent_geom])
else:
print("Unexpected extent specification, reverting to input raster extent")
out_extent = 'raster'
#Add padding around shp_extent
#Should implement buffer here
if pad is not None:
out_extent = pad_extent(out_extent, width=pad)
print("Raster to clip: %s\nShapefile used to clip: %s" % (r_fn, shp_fn))
if verbose:
print(shp_extent)
print(r_extent)
print(out_extent)
r_ds = warplib.memwarp(r_ds, extent=out_extent, t_srs=out_srs, r='cubic')
r = iolib.ds_getma(r_ds)
#If bbox, return without clipping, otherwise, clip to polygons
if not bbox:
#Create binary mask from shp
mask = shp2array(shp_fn, r_ds)
if invert:
mask = ~(mask)
#Now apply the mask
r = np.ma.array(r, mask=mask)
#Return both the array and the dataset, needed for writing out
#Should probably just write r to r_ds and return r_ds
return r, r_ds | [
"def",
"raster_shpclip",
"(",
"r_fn",
",",
"shp_fn",
",",
"extent",
"=",
"'raster'",
",",
"bbox",
"=",
"False",
",",
"pad",
"=",
"None",
",",
"invert",
"=",
"False",
",",
"verbose",
"=",
"False",
")",
":",
"from",
"pygeotools",
".",
"lib",
"import",
... | Clip an input raster by input polygon shapefile for given extent | [
"Clip",
"an",
"input",
"raster",
"by",
"input",
"polygon",
"shapefile",
"for",
"given",
"extent"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L797-L861 | train | 213,072 |
dshean/pygeotools | pygeotools/lib/geolib.py | geom2shp | def geom2shp(geom, out_fn, fields=False):
"""Write out a new shapefile for input geometry
"""
from pygeotools.lib import timelib
driverName = "ESRI Shapefile"
drv = ogr.GetDriverByName(driverName)
if os.path.exists(out_fn):
drv.DeleteDataSource(out_fn)
out_ds = drv.CreateDataSource(out_fn)
out_lyrname = os.path.splitext(os.path.split(out_fn)[1])[0]
geom_srs = geom.GetSpatialReference()
geom_type = geom.GetGeometryType()
out_lyr = out_ds.CreateLayer(out_lyrname, geom_srs, geom_type)
if fields:
field_defn = ogr.FieldDefn("name", ogr.OFTString)
field_defn.SetWidth(128)
out_lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn("path", ogr.OFTString)
field_defn.SetWidth(254)
out_lyr.CreateField(field_defn)
#field_defn = ogr.FieldDefn("date", ogr.OFTString)
#This allows sorting by date
field_defn = ogr.FieldDefn("date", ogr.OFTInteger)
field_defn.SetWidth(32)
out_lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn("decyear", ogr.OFTReal)
field_defn.SetPrecision(8)
field_defn.SetWidth(64)
out_lyr.CreateField(field_defn)
out_feat = ogr.Feature(out_lyr.GetLayerDefn())
out_feat.SetGeometry(geom)
if fields:
#Hack to force output extesion to tif, since out_fn is shp
out_path = os.path.splitext(out_fn)[0] + '.tif'
out_feat.SetField("name", os.path.split(out_path)[-1])
out_feat.SetField("path", out_path)
#Try to extract a date from input raster fn
out_feat_date = timelib.fn_getdatetime(out_fn)
if out_feat_date is not None:
datestamp = int(out_feat_date.strftime('%Y%m%d'))
#out_feat_date = int(out_feat_date.strftime('%Y%m%d%H%M'))
out_feat.SetField("date", datestamp)
decyear = timelib.dt2decyear(out_feat_date)
out_feat.SetField("decyear", decyear)
out_lyr.CreateFeature(out_feat)
out_ds = None | python | def geom2shp(geom, out_fn, fields=False):
"""Write out a new shapefile for input geometry
"""
from pygeotools.lib import timelib
driverName = "ESRI Shapefile"
drv = ogr.GetDriverByName(driverName)
if os.path.exists(out_fn):
drv.DeleteDataSource(out_fn)
out_ds = drv.CreateDataSource(out_fn)
out_lyrname = os.path.splitext(os.path.split(out_fn)[1])[0]
geom_srs = geom.GetSpatialReference()
geom_type = geom.GetGeometryType()
out_lyr = out_ds.CreateLayer(out_lyrname, geom_srs, geom_type)
if fields:
field_defn = ogr.FieldDefn("name", ogr.OFTString)
field_defn.SetWidth(128)
out_lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn("path", ogr.OFTString)
field_defn.SetWidth(254)
out_lyr.CreateField(field_defn)
#field_defn = ogr.FieldDefn("date", ogr.OFTString)
#This allows sorting by date
field_defn = ogr.FieldDefn("date", ogr.OFTInteger)
field_defn.SetWidth(32)
out_lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn("decyear", ogr.OFTReal)
field_defn.SetPrecision(8)
field_defn.SetWidth(64)
out_lyr.CreateField(field_defn)
out_feat = ogr.Feature(out_lyr.GetLayerDefn())
out_feat.SetGeometry(geom)
if fields:
#Hack to force output extesion to tif, since out_fn is shp
out_path = os.path.splitext(out_fn)[0] + '.tif'
out_feat.SetField("name", os.path.split(out_path)[-1])
out_feat.SetField("path", out_path)
#Try to extract a date from input raster fn
out_feat_date = timelib.fn_getdatetime(out_fn)
if out_feat_date is not None:
datestamp = int(out_feat_date.strftime('%Y%m%d'))
#out_feat_date = int(out_feat_date.strftime('%Y%m%d%H%M'))
out_feat.SetField("date", datestamp)
decyear = timelib.dt2decyear(out_feat_date)
out_feat.SetField("decyear", decyear)
out_lyr.CreateFeature(out_feat)
out_ds = None | [
"def",
"geom2shp",
"(",
"geom",
",",
"out_fn",
",",
"fields",
"=",
"False",
")",
":",
"from",
"pygeotools",
".",
"lib",
"import",
"timelib",
"driverName",
"=",
"\"ESRI Shapefile\"",
"drv",
"=",
"ogr",
".",
"GetDriverByName",
"(",
"driverName",
")",
"if",
"... | Write out a new shapefile for input geometry | [
"Write",
"out",
"a",
"new",
"shapefile",
"for",
"input",
"geometry"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L888-L933 | train | 213,073 |
dshean/pygeotools | pygeotools/lib/geolib.py | get_outline | def get_outline(ds, t_srs=None, scale=1.0, simplify=False, convex=False):
"""Generate outline of unmasked values in input raster
get_outline is an attempt to reproduce the PostGIS Raster ST_MinConvexHull function
Could potentially do the following: Extract random pts from unmasked elements, get indices, Run scipy convex hull, Convert hull indices to mapped coords
See this: http://stackoverflow.com/questions/3654289/scipy-create-2d-polygon-mask
This generates a wkt polygon outline of valid data for the input raster
Want to limit the dimensions of a, as notmasked_edges is slow: a = iolib.ds_getma_sub(ds, scale=scale)
"""
gt = np.array(ds.GetGeoTransform())
from pygeotools.lib import iolib
a = iolib.ds_getma_sub(ds, scale=scale)
#Create empty geometry
geom = ogr.Geometry(ogr.wkbPolygon)
#Check to make sure we have unmasked data
if a.count() != 0:
#Scale the gt for reduced resolution
#The UL coords should remain the same, as any rounding will trim LR
if (scale != 1.0):
gt[1] *= scale
gt[5] *= scale
#Get srs
ds_srs = get_ds_srs(ds)
if t_srs is None:
t_srs = ds_srs
#Find the unmasked edges
#Note: using only axis=0 from notmasked_edges will miss undercuts - see malib.get_edgemask
#Better ways to do this - binary mask, sum (see numpy2stl)
#edges0, edges1, edges = malib.get_edges(a)
px = np.ma.notmasked_edges(a, axis=0)
# coord = []
#Combine edge arrays, reversing order and adding first point to complete polygon
x = np.concatenate((px[0][1][::1], px[1][1][::-1], [px[0][1][0]]))
#x = np.concatenate((edges[0][1][::1], edges[1][1][::-1], [edges[0][1][0]]))
y = np.concatenate((px[0][0][::1], px[1][0][::-1], [px[0][0][0]]))
#y = np.concatenate((edges[0][0][::1], edges[1][0][::-1], [edges[0][0][0]]))
#Use np arrays for computing mapped coords
mx, my = pixelToMap(x, y, gt)
#Create wkt string
geom_wkt = 'POLYGON(({0}))'.format(', '.join(['{0} {1}'.format(*a) for a in zip(mx,my)]))
geom = ogr.CreateGeometryFromWkt(geom_wkt)
if not ds_srs.IsSame(t_srs):
ct = osr.CoordinateTransformation(ds_srs, t_srs)
geom.Transform(ct)
#Make sure geometry has correct srs assigned
geom.AssignSpatialReference(t_srs)
if not geom.IsValid():
tol = gt[1] * 0.1
geom = geom.Simplify(tol)
#Need to get output units and extent for tolerance specification
if simplify:
#2 pixel tolerance
tol = gt[1] * 2
geom = geom.Simplify(tol)
if convex:
geom = geom.ConvexHull()
else:
print("No unmasked values found")
return geom | python | def get_outline(ds, t_srs=None, scale=1.0, simplify=False, convex=False):
"""Generate outline of unmasked values in input raster
get_outline is an attempt to reproduce the PostGIS Raster ST_MinConvexHull function
Could potentially do the following: Extract random pts from unmasked elements, get indices, Run scipy convex hull, Convert hull indices to mapped coords
See this: http://stackoverflow.com/questions/3654289/scipy-create-2d-polygon-mask
This generates a wkt polygon outline of valid data for the input raster
Want to limit the dimensions of a, as notmasked_edges is slow: a = iolib.ds_getma_sub(ds, scale=scale)
"""
gt = np.array(ds.GetGeoTransform())
from pygeotools.lib import iolib
a = iolib.ds_getma_sub(ds, scale=scale)
#Create empty geometry
geom = ogr.Geometry(ogr.wkbPolygon)
#Check to make sure we have unmasked data
if a.count() != 0:
#Scale the gt for reduced resolution
#The UL coords should remain the same, as any rounding will trim LR
if (scale != 1.0):
gt[1] *= scale
gt[5] *= scale
#Get srs
ds_srs = get_ds_srs(ds)
if t_srs is None:
t_srs = ds_srs
#Find the unmasked edges
#Note: using only axis=0 from notmasked_edges will miss undercuts - see malib.get_edgemask
#Better ways to do this - binary mask, sum (see numpy2stl)
#edges0, edges1, edges = malib.get_edges(a)
px = np.ma.notmasked_edges(a, axis=0)
# coord = []
#Combine edge arrays, reversing order and adding first point to complete polygon
x = np.concatenate((px[0][1][::1], px[1][1][::-1], [px[0][1][0]]))
#x = np.concatenate((edges[0][1][::1], edges[1][1][::-1], [edges[0][1][0]]))
y = np.concatenate((px[0][0][::1], px[1][0][::-1], [px[0][0][0]]))
#y = np.concatenate((edges[0][0][::1], edges[1][0][::-1], [edges[0][0][0]]))
#Use np arrays for computing mapped coords
mx, my = pixelToMap(x, y, gt)
#Create wkt string
geom_wkt = 'POLYGON(({0}))'.format(', '.join(['{0} {1}'.format(*a) for a in zip(mx,my)]))
geom = ogr.CreateGeometryFromWkt(geom_wkt)
if not ds_srs.IsSame(t_srs):
ct = osr.CoordinateTransformation(ds_srs, t_srs)
geom.Transform(ct)
#Make sure geometry has correct srs assigned
geom.AssignSpatialReference(t_srs)
if not geom.IsValid():
tol = gt[1] * 0.1
geom = geom.Simplify(tol)
#Need to get output units and extent for tolerance specification
if simplify:
#2 pixel tolerance
tol = gt[1] * 2
geom = geom.Simplify(tol)
if convex:
geom = geom.ConvexHull()
else:
print("No unmasked values found")
return geom | [
"def",
"get_outline",
"(",
"ds",
",",
"t_srs",
"=",
"None",
",",
"scale",
"=",
"1.0",
",",
"simplify",
"=",
"False",
",",
"convex",
"=",
"False",
")",
":",
"gt",
"=",
"np",
".",
"array",
"(",
"ds",
".",
"GetGeoTransform",
"(",
")",
")",
"from",
"... | Generate outline of unmasked values in input raster
get_outline is an attempt to reproduce the PostGIS Raster ST_MinConvexHull function
Could potentially do the following: Extract random pts from unmasked elements, get indices, Run scipy convex hull, Convert hull indices to mapped coords
See this: http://stackoverflow.com/questions/3654289/scipy-create-2d-polygon-mask
This generates a wkt polygon outline of valid data for the input raster
Want to limit the dimensions of a, as notmasked_edges is slow: a = iolib.ds_getma_sub(ds, scale=scale) | [
"Generate",
"outline",
"of",
"unmasked",
"values",
"in",
"input",
"raster"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L936-L999 | train | 213,074 |
dshean/pygeotools | pygeotools/lib/geolib.py | ds_cT | def ds_cT(ds, x, y, xy_srs=wgs_srs):
"""Convert input point coordinates to map coordinates that match input dataset
"""
#Convert lat/lon to projected srs
ds_srs = get_ds_srs(ds)
#If xy_srs is undefined, assume it is the same as ds_srs
mX = x
mY = y
if xy_srs is not None:
if not ds_srs.IsSame(xy_srs):
mX, mY, mZ = cT_helper(x, y, 0, xy_srs, ds_srs)
return mX, mY | python | def ds_cT(ds, x, y, xy_srs=wgs_srs):
"""Convert input point coordinates to map coordinates that match input dataset
"""
#Convert lat/lon to projected srs
ds_srs = get_ds_srs(ds)
#If xy_srs is undefined, assume it is the same as ds_srs
mX = x
mY = y
if xy_srs is not None:
if not ds_srs.IsSame(xy_srs):
mX, mY, mZ = cT_helper(x, y, 0, xy_srs, ds_srs)
return mX, mY | [
"def",
"ds_cT",
"(",
"ds",
",",
"x",
",",
"y",
",",
"xy_srs",
"=",
"wgs_srs",
")",
":",
"#Convert lat/lon to projected srs",
"ds_srs",
"=",
"get_ds_srs",
"(",
"ds",
")",
"#If xy_srs is undefined, assume it is the same as ds_srs",
"mX",
"=",
"x",
"mY",
"=",
"y",
... | Convert input point coordinates to map coordinates that match input dataset | [
"Convert",
"input",
"point",
"coordinates",
"to",
"map",
"coordinates",
"that",
"match",
"input",
"dataset"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L1005-L1016 | train | 213,075 |
dshean/pygeotools | pygeotools/lib/geolib.py | line2pts | def line2pts(geom, dl=None):
"""Given an input line geom, generate points at fixed interval
Useful for extracting profile data from raster
"""
#Extract list of (x,y) tuples at nodes
nodes = geom.GetPoints()
#print "%i nodes" % len(nodes)
#Point spacing in map units
if dl is None:
nsteps=1000
dl = geom.Length()/nsteps
#This only works for equidistant projection!
#l = np.arange(0, geom.Length(), dl)
#Initialize empty lists
l = []
mX = []
mY = []
#Add first point to output lists
l += [0]
x = nodes[0][0]
y = nodes[0][1]
mX += [x]
mY += [y]
#Remainder
rem_l = 0
#Previous length (initially 0)
last_l = l[-1]
#Loop through each line segment in the feature
for i in range(0,len(nodes)-1):
x1, y1 = nodes[i]
x2, y2 = nodes[i+1]
#Total length of segment
tl = np.sqrt((x2-x1)**2 + (y2-y1)**2)
#Number of dl steps we can fit in this segment
#This returns floor
steps = int((tl+rem_l)/dl)
if steps > 0:
dx = ((x2-x1)/tl)*dl
dy = ((y2-y1)/tl)*dl
rem_x = rem_l*(dx/dl)
rem_y = rem_l*(dy/dl)
#Loop through each step and append to lists
for n in range(1, steps+1):
l += [last_l + (dl*n)]
#Remove the existing remainder
x = x1 + (dx*n) - rem_x
y = y1 + (dy*n) - rem_y
mX += [x]
mY += [y]
#Note: could just build up arrays of pX, pY for entire line, then do single z extraction
#Update the remainder
rem_l += tl - (steps * dl)
last_l = l[-1]
else:
rem_l += tl
return l, mX, mY | python | def line2pts(geom, dl=None):
"""Given an input line geom, generate points at fixed interval
Useful for extracting profile data from raster
"""
#Extract list of (x,y) tuples at nodes
nodes = geom.GetPoints()
#print "%i nodes" % len(nodes)
#Point spacing in map units
if dl is None:
nsteps=1000
dl = geom.Length()/nsteps
#This only works for equidistant projection!
#l = np.arange(0, geom.Length(), dl)
#Initialize empty lists
l = []
mX = []
mY = []
#Add first point to output lists
l += [0]
x = nodes[0][0]
y = nodes[0][1]
mX += [x]
mY += [y]
#Remainder
rem_l = 0
#Previous length (initially 0)
last_l = l[-1]
#Loop through each line segment in the feature
for i in range(0,len(nodes)-1):
x1, y1 = nodes[i]
x2, y2 = nodes[i+1]
#Total length of segment
tl = np.sqrt((x2-x1)**2 + (y2-y1)**2)
#Number of dl steps we can fit in this segment
#This returns floor
steps = int((tl+rem_l)/dl)
if steps > 0:
dx = ((x2-x1)/tl)*dl
dy = ((y2-y1)/tl)*dl
rem_x = rem_l*(dx/dl)
rem_y = rem_l*(dy/dl)
#Loop through each step and append to lists
for n in range(1, steps+1):
l += [last_l + (dl*n)]
#Remove the existing remainder
x = x1 + (dx*n) - rem_x
y = y1 + (dy*n) - rem_y
mX += [x]
mY += [y]
#Note: could just build up arrays of pX, pY for entire line, then do single z extraction
#Update the remainder
rem_l += tl - (steps * dl)
last_l = l[-1]
else:
rem_l += tl
return l, mX, mY | [
"def",
"line2pts",
"(",
"geom",
",",
"dl",
"=",
"None",
")",
":",
"#Extract list of (x,y) tuples at nodes",
"nodes",
"=",
"geom",
".",
"GetPoints",
"(",
")",
"#print \"%i nodes\" % len(nodes)",
"#Point spacing in map units",
"if",
"dl",
"is",
"None",
":",
"nsteps",
... | Given an input line geom, generate points at fixed interval
Useful for extracting profile data from raster | [
"Given",
"an",
"input",
"line",
"geom",
"generate",
"points",
"at",
"fixed",
"interval",
"Useful",
"for",
"extracting",
"profile",
"data",
"from",
"raster"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L1122-L1190 | train | 213,076 |
dshean/pygeotools | pygeotools/lib/geolib.py | get_res_stats | def get_res_stats(ds_list, t_srs=None):
"""Return resolution stats for an input dataset list
"""
if t_srs is None:
t_srs = get_ds_srs(ds_list[0])
res = np.array([get_res(ds, t_srs=t_srs) for ds in ds_list])
#Check that all projections are identical
#gt_array = np.array([ds.GetGeoTransform() for ds in args])
#xres = gt_array[:,1]
#yres = -gt_array[:,5]
#if xres == yres:
#res = np.concatenate((xres, yres))
min = np.min(res)
max = np.max(res)
mean = np.mean(res)
med = np.median(res)
return (min, max, mean, med) | python | def get_res_stats(ds_list, t_srs=None):
"""Return resolution stats for an input dataset list
"""
if t_srs is None:
t_srs = get_ds_srs(ds_list[0])
res = np.array([get_res(ds, t_srs=t_srs) for ds in ds_list])
#Check that all projections are identical
#gt_array = np.array([ds.GetGeoTransform() for ds in args])
#xres = gt_array[:,1]
#yres = -gt_array[:,5]
#if xres == yres:
#res = np.concatenate((xres, yres))
min = np.min(res)
max = np.max(res)
mean = np.mean(res)
med = np.median(res)
return (min, max, mean, med) | [
"def",
"get_res_stats",
"(",
"ds_list",
",",
"t_srs",
"=",
"None",
")",
":",
"if",
"t_srs",
"is",
"None",
":",
"t_srs",
"=",
"get_ds_srs",
"(",
"ds_list",
"[",
"0",
"]",
")",
"res",
"=",
"np",
".",
"array",
"(",
"[",
"get_res",
"(",
"ds",
",",
"t... | Return resolution stats for an input dataset list | [
"Return",
"resolution",
"stats",
"for",
"an",
"input",
"dataset",
"list"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L1204-L1220 | train | 213,077 |
dshean/pygeotools | pygeotools/lib/geolib.py | get_res | def get_res(ds, t_srs=None, square=False):
"""Get GDAL Dataset raster resolution
"""
gt = ds.GetGeoTransform()
ds_srs = get_ds_srs(ds)
#This is Xres, Yres
res = [gt[1], np.abs(gt[5])]
if square:
res = [np.mean(res), np.mean(res)]
if t_srs is not None and not ds_srs.IsSame(t_srs):
if True:
#This diagonal approach is similar to the approach in gdaltransformer.cpp
#Bad news for large extents near the poles
#ullr = get_ullr(ds, t_srs)
#diag = np.sqrt((ullr[0]-ullr[2])**2 + (ullr[1]-ullr[3])**2)
extent = ds_extent(ds, t_srs)
diag = np.sqrt((extent[2]-extent[0])**2 + (extent[3]-extent[1])**2)
res = diag / np.sqrt(ds.RasterXSize**2 + ds.RasterYSize**2)
res = [res, res]
else:
#Compute from center pixel
ct = osr.CoordinateTransformation(ds_srs, t_srs)
pt = get_center(ds)
#Transform center coordinates
pt_ct = ct.TransformPoint(*pt)
#Transform center + single pixel offset coordinates
pt_ct_plus = ct.TransformPoint(pt[0] + gt[1], pt[1] + gt[5])
#Compute resolution in new units
res = [pt_ct_plus[0] - pt_ct[0], np.abs(pt_ct_plus[1] - pt_ct[1])]
return res | python | def get_res(ds, t_srs=None, square=False):
"""Get GDAL Dataset raster resolution
"""
gt = ds.GetGeoTransform()
ds_srs = get_ds_srs(ds)
#This is Xres, Yres
res = [gt[1], np.abs(gt[5])]
if square:
res = [np.mean(res), np.mean(res)]
if t_srs is not None and not ds_srs.IsSame(t_srs):
if True:
#This diagonal approach is similar to the approach in gdaltransformer.cpp
#Bad news for large extents near the poles
#ullr = get_ullr(ds, t_srs)
#diag = np.sqrt((ullr[0]-ullr[2])**2 + (ullr[1]-ullr[3])**2)
extent = ds_extent(ds, t_srs)
diag = np.sqrt((extent[2]-extent[0])**2 + (extent[3]-extent[1])**2)
res = diag / np.sqrt(ds.RasterXSize**2 + ds.RasterYSize**2)
res = [res, res]
else:
#Compute from center pixel
ct = osr.CoordinateTransformation(ds_srs, t_srs)
pt = get_center(ds)
#Transform center coordinates
pt_ct = ct.TransformPoint(*pt)
#Transform center + single pixel offset coordinates
pt_ct_plus = ct.TransformPoint(pt[0] + gt[1], pt[1] + gt[5])
#Compute resolution in new units
res = [pt_ct_plus[0] - pt_ct[0], np.abs(pt_ct_plus[1] - pt_ct[1])]
return res | [
"def",
"get_res",
"(",
"ds",
",",
"t_srs",
"=",
"None",
",",
"square",
"=",
"False",
")",
":",
"gt",
"=",
"ds",
".",
"GetGeoTransform",
"(",
")",
"ds_srs",
"=",
"get_ds_srs",
"(",
"ds",
")",
"#This is Xres, Yres",
"res",
"=",
"[",
"gt",
"[",
"1",
"... | Get GDAL Dataset raster resolution | [
"Get",
"GDAL",
"Dataset",
"raster",
"resolution"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L1222-L1251 | train | 213,078 |
dshean/pygeotools | pygeotools/lib/geolib.py | get_center | def get_center(ds, t_srs=None):
"""Get center coordinates of GDAL Dataset
"""
gt = ds.GetGeoTransform()
ds_srs = get_ds_srs(ds)
#Note: this is center of center pixel, not ul corner of center pixel
center = [gt[0] + (gt[1] * ds.RasterXSize/2.0), gt[3] + (gt[5] * ds.RasterYSize/2.0)]
#include t_srs.Validate() and t_srs.Fixup()
if t_srs is not None and not ds_srs.IsSame(t_srs):
ct = osr.CoordinateTransformation(ds_srs, t_srs)
center = list(ct.TransformPoint(*center)[0:2])
return center | python | def get_center(ds, t_srs=None):
"""Get center coordinates of GDAL Dataset
"""
gt = ds.GetGeoTransform()
ds_srs = get_ds_srs(ds)
#Note: this is center of center pixel, not ul corner of center pixel
center = [gt[0] + (gt[1] * ds.RasterXSize/2.0), gt[3] + (gt[5] * ds.RasterYSize/2.0)]
#include t_srs.Validate() and t_srs.Fixup()
if t_srs is not None and not ds_srs.IsSame(t_srs):
ct = osr.CoordinateTransformation(ds_srs, t_srs)
center = list(ct.TransformPoint(*center)[0:2])
return center | [
"def",
"get_center",
"(",
"ds",
",",
"t_srs",
"=",
"None",
")",
":",
"gt",
"=",
"ds",
".",
"GetGeoTransform",
"(",
")",
"ds_srs",
"=",
"get_ds_srs",
"(",
"ds",
")",
"#Note: this is center of center pixel, not ul corner of center pixel",
"center",
"=",
"[",
"gt",... | Get center coordinates of GDAL Dataset | [
"Get",
"center",
"coordinates",
"of",
"GDAL",
"Dataset"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L1253-L1264 | train | 213,079 |
dshean/pygeotools | pygeotools/lib/geolib.py | get_ds_srs | def get_ds_srs(ds):
"""Get srs object for GDAL Datset
"""
ds_srs = osr.SpatialReference()
ds_srs.ImportFromWkt(ds.GetProjectionRef())
return ds_srs | python | def get_ds_srs(ds):
"""Get srs object for GDAL Datset
"""
ds_srs = osr.SpatialReference()
ds_srs.ImportFromWkt(ds.GetProjectionRef())
return ds_srs | [
"def",
"get_ds_srs",
"(",
"ds",
")",
":",
"ds_srs",
"=",
"osr",
".",
"SpatialReference",
"(",
")",
"ds_srs",
".",
"ImportFromWkt",
"(",
"ds",
".",
"GetProjectionRef",
"(",
")",
")",
"return",
"ds_srs"
] | Get srs object for GDAL Datset | [
"Get",
"srs",
"object",
"for",
"GDAL",
"Datset"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L1266-L1271 | train | 213,080 |
dshean/pygeotools | pygeotools/lib/geolib.py | srs_check | def srs_check(ds):
"""Check validitiy of Dataset srs
Return True if srs is properly defined
"""
# ds_srs = get_ds_srs(ds)
gt = np.array(ds.GetGeoTransform())
gt_check = ~np.all(gt == np.array((0.0, 1.0, 0.0, 0.0, 0.0, 1.0)))
proj_check = (ds.GetProjection() != '')
#proj_check = ds_srs.IsProjected()
out = False
if gt_check and proj_check:
out = True
return out | python | def srs_check(ds):
"""Check validitiy of Dataset srs
Return True if srs is properly defined
"""
# ds_srs = get_ds_srs(ds)
gt = np.array(ds.GetGeoTransform())
gt_check = ~np.all(gt == np.array((0.0, 1.0, 0.0, 0.0, 0.0, 1.0)))
proj_check = (ds.GetProjection() != '')
#proj_check = ds_srs.IsProjected()
out = False
if gt_check and proj_check:
out = True
return out | [
"def",
"srs_check",
"(",
"ds",
")",
":",
"# ds_srs = get_ds_srs(ds)",
"gt",
"=",
"np",
".",
"array",
"(",
"ds",
".",
"GetGeoTransform",
"(",
")",
")",
"gt_check",
"=",
"~",
"np",
".",
"all",
"(",
"gt",
"==",
"np",
".",
"array",
"(",
"(",
"0.0",
","... | Check validitiy of Dataset srs
Return True if srs is properly defined | [
"Check",
"validitiy",
"of",
"Dataset",
"srs"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L1273-L1286 | train | 213,081 |
dshean/pygeotools | pygeotools/lib/geolib.py | ds_IsEmpty | def ds_IsEmpty(ds):
"""Check to see if dataset is empty after warp
"""
out = False
b = ds.GetRasterBand(1)
#Looks like this throws:
#ERROR 1: Failed to compute min/max, no valid pixels found in sampling.
#Should just catch this rater than bothering with logic below
try:
mm = b.ComputeRasterMinMax()
if (mm[0] == mm[1]):
ndv = b.GetNoDataValue()
if ndv is None:
out = True
else:
if (mm[0] == ndv):
out = True
except Exception:
out = True
#Check for std of nan
#import math
#stats = b.ComputeStatistics(1)
#for x in stats:
# if math.isnan(x):
# out = True
# break
return out | python | def ds_IsEmpty(ds):
"""Check to see if dataset is empty after warp
"""
out = False
b = ds.GetRasterBand(1)
#Looks like this throws:
#ERROR 1: Failed to compute min/max, no valid pixels found in sampling.
#Should just catch this rater than bothering with logic below
try:
mm = b.ComputeRasterMinMax()
if (mm[0] == mm[1]):
ndv = b.GetNoDataValue()
if ndv is None:
out = True
else:
if (mm[0] == ndv):
out = True
except Exception:
out = True
#Check for std of nan
#import math
#stats = b.ComputeStatistics(1)
#for x in stats:
# if math.isnan(x):
# out = True
# break
return out | [
"def",
"ds_IsEmpty",
"(",
"ds",
")",
":",
"out",
"=",
"False",
"b",
"=",
"ds",
".",
"GetRasterBand",
"(",
"1",
")",
"#Looks like this throws:",
"#ERROR 1: Failed to compute min/max, no valid pixels found in sampling.",
"#Should just catch this rater than bothering with logic be... | Check to see if dataset is empty after warp | [
"Check",
"to",
"see",
"if",
"dataset",
"is",
"empty",
"after",
"warp"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L1288-L1314 | train | 213,082 |
dshean/pygeotools | pygeotools/lib/geolib.py | gt_corners | def gt_corners(gt, nx, ny):
"""Get corner coordinates based on input geotransform and raster dimensions
"""
ul = [gt[0], gt[3]]
ll = [gt[0], gt[3] + (gt[5] * ny)]
ur = [gt[0] + (gt[1] * nx), gt[3]]
lr = [gt[0] + (gt[1] * nx), gt[3] + (gt[5] * ny)]
return ul, ll, ur, lr | python | def gt_corners(gt, nx, ny):
"""Get corner coordinates based on input geotransform and raster dimensions
"""
ul = [gt[0], gt[3]]
ll = [gt[0], gt[3] + (gt[5] * ny)]
ur = [gt[0] + (gt[1] * nx), gt[3]]
lr = [gt[0] + (gt[1] * nx), gt[3] + (gt[5] * ny)]
return ul, ll, ur, lr | [
"def",
"gt_corners",
"(",
"gt",
",",
"nx",
",",
"ny",
")",
":",
"ul",
"=",
"[",
"gt",
"[",
"0",
"]",
",",
"gt",
"[",
"3",
"]",
"]",
"ll",
"=",
"[",
"gt",
"[",
"0",
"]",
",",
"gt",
"[",
"3",
"]",
"+",
"(",
"gt",
"[",
"5",
"]",
"*",
"... | Get corner coordinates based on input geotransform and raster dimensions | [
"Get",
"corner",
"coordinates",
"based",
"on",
"input",
"geotransform",
"and",
"raster",
"dimensions"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L1316-L1323 | train | 213,083 |
dshean/pygeotools | pygeotools/lib/geolib.py | ds_geom | def ds_geom(ds, t_srs=None):
"""Return dataset bbox envelope as geom
"""
gt = ds.GetGeoTransform()
ds_srs = get_ds_srs(ds)
if t_srs is None:
t_srs = ds_srs
ns = ds.RasterXSize
nl = ds.RasterYSize
x = np.array([0, ns, ns, 0, 0], dtype=float)
y = np.array([0, 0, nl, nl, 0], dtype=float)
#Note: pixelToMap adds 0.5 to input coords, need to account for this here
x -= 0.5
y -= 0.5
mx, my = pixelToMap(x, y, gt)
geom_wkt = 'POLYGON(({0}))'.format(', '.join(['{0} {1}'.format(*a) for a in zip(mx,my)]))
geom = ogr.CreateGeometryFromWkt(geom_wkt)
geom.AssignSpatialReference(ds_srs)
if not ds_srs.IsSame(t_srs):
geom_transform(geom, t_srs)
return geom | python | def ds_geom(ds, t_srs=None):
"""Return dataset bbox envelope as geom
"""
gt = ds.GetGeoTransform()
ds_srs = get_ds_srs(ds)
if t_srs is None:
t_srs = ds_srs
ns = ds.RasterXSize
nl = ds.RasterYSize
x = np.array([0, ns, ns, 0, 0], dtype=float)
y = np.array([0, 0, nl, nl, 0], dtype=float)
#Note: pixelToMap adds 0.5 to input coords, need to account for this here
x -= 0.5
y -= 0.5
mx, my = pixelToMap(x, y, gt)
geom_wkt = 'POLYGON(({0}))'.format(', '.join(['{0} {1}'.format(*a) for a in zip(mx,my)]))
geom = ogr.CreateGeometryFromWkt(geom_wkt)
geom.AssignSpatialReference(ds_srs)
if not ds_srs.IsSame(t_srs):
geom_transform(geom, t_srs)
return geom | [
"def",
"ds_geom",
"(",
"ds",
",",
"t_srs",
"=",
"None",
")",
":",
"gt",
"=",
"ds",
".",
"GetGeoTransform",
"(",
")",
"ds_srs",
"=",
"get_ds_srs",
"(",
"ds",
")",
"if",
"t_srs",
"is",
"None",
":",
"t_srs",
"=",
"ds_srs",
"ns",
"=",
"ds",
".",
"Ras... | Return dataset bbox envelope as geom | [
"Return",
"dataset",
"bbox",
"envelope",
"as",
"geom"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L1389-L1409 | train | 213,084 |
dshean/pygeotools | pygeotools/lib/geolib.py | geom_wh | def geom_wh(geom):
"""Compute width and height of geometry in projected units
"""
e = geom.GetEnvelope()
h = e[1] - e[0]
w = e[3] - e[2]
return w, h | python | def geom_wh(geom):
"""Compute width and height of geometry in projected units
"""
e = geom.GetEnvelope()
h = e[1] - e[0]
w = e[3] - e[2]
return w, h | [
"def",
"geom_wh",
"(",
"geom",
")",
":",
"e",
"=",
"geom",
".",
"GetEnvelope",
"(",
")",
"h",
"=",
"e",
"[",
"1",
"]",
"-",
"e",
"[",
"0",
"]",
"w",
"=",
"e",
"[",
"3",
"]",
"-",
"e",
"[",
"2",
"]",
"return",
"w",
",",
"h"
] | Compute width and height of geometry in projected units | [
"Compute",
"width",
"and",
"height",
"of",
"geometry",
"in",
"projected",
"units"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L1497-L1503 | train | 213,085 |
dshean/pygeotools | pygeotools/lib/geolib.py | gdaldem_mem_ma | def gdaldem_mem_ma(ma, ds=None, res=None, extent=None, srs=None, processing='hillshade', returnma=False, computeEdges=False):
"""
Wrapper to allow gdaldem calculations for arbitrary NumPy masked array input
Untested, work in progress placeholder
Should only need to specify res, can caluclate local gt, cartesian srs
"""
if ds is None:
ds = mem_ds(res, extent, srs=None, dtype=gdal.GDT_Float32)
else:
ds = mem_ds_copy(ds)
b = ds.GetRasterBand(1)
b.WriteArray(ma)
out = gdaldem_mem_ds(ds, processing=processing, returnma=returnma)
return out | python | def gdaldem_mem_ma(ma, ds=None, res=None, extent=None, srs=None, processing='hillshade', returnma=False, computeEdges=False):
"""
Wrapper to allow gdaldem calculations for arbitrary NumPy masked array input
Untested, work in progress placeholder
Should only need to specify res, can caluclate local gt, cartesian srs
"""
if ds is None:
ds = mem_ds(res, extent, srs=None, dtype=gdal.GDT_Float32)
else:
ds = mem_ds_copy(ds)
b = ds.GetRasterBand(1)
b.WriteArray(ma)
out = gdaldem_mem_ds(ds, processing=processing, returnma=returnma)
return out | [
"def",
"gdaldem_mem_ma",
"(",
"ma",
",",
"ds",
"=",
"None",
",",
"res",
"=",
"None",
",",
"extent",
"=",
"None",
",",
"srs",
"=",
"None",
",",
"processing",
"=",
"'hillshade'",
",",
"returnma",
"=",
"False",
",",
"computeEdges",
"=",
"False",
")",
":... | Wrapper to allow gdaldem calculations for arbitrary NumPy masked array input
Untested, work in progress placeholder
Should only need to specify res, can caluclate local gt, cartesian srs | [
"Wrapper",
"to",
"allow",
"gdaldem",
"calculations",
"for",
"arbitrary",
"NumPy",
"masked",
"array",
"input",
"Untested",
"work",
"in",
"progress",
"placeholder",
"Should",
"only",
"need",
"to",
"specify",
"res",
"can",
"caluclate",
"local",
"gt",
"cartesian",
"... | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L1660-L1673 | train | 213,086 |
dshean/pygeotools | pygeotools/lib/geolib.py | get_xy_ma | def get_xy_ma(bma, gt, stride=1, origmask=True, newmask=None):
"""Return arrays of x and y map coordinates for input array and geotransform
"""
pX = np.arange(0, bma.shape[1], stride)
pY = np.arange(0, bma.shape[0], stride)
psamp = np.meshgrid(pX, pY)
#if origmask:
# psamp = np.ma.array(psamp, mask=np.ma.getmaskarray(bma), fill_value=0)
mX, mY = pixelToMap(psamp[0], psamp[1], gt)
mask = None
if origmask:
mask = np.ma.getmaskarray(bma)[::stride]
if newmask is not None:
mask = newmask[::stride]
mX = np.ma.array(mX, mask=mask, fill_value=0)
mY = np.ma.array(mY, mask=mask, fill_value=0)
return mX, mY | python | def get_xy_ma(bma, gt, stride=1, origmask=True, newmask=None):
"""Return arrays of x and y map coordinates for input array and geotransform
"""
pX = np.arange(0, bma.shape[1], stride)
pY = np.arange(0, bma.shape[0], stride)
psamp = np.meshgrid(pX, pY)
#if origmask:
# psamp = np.ma.array(psamp, mask=np.ma.getmaskarray(bma), fill_value=0)
mX, mY = pixelToMap(psamp[0], psamp[1], gt)
mask = None
if origmask:
mask = np.ma.getmaskarray(bma)[::stride]
if newmask is not None:
mask = newmask[::stride]
mX = np.ma.array(mX, mask=mask, fill_value=0)
mY = np.ma.array(mY, mask=mask, fill_value=0)
return mX, mY | [
"def",
"get_xy_ma",
"(",
"bma",
",",
"gt",
",",
"stride",
"=",
"1",
",",
"origmask",
"=",
"True",
",",
"newmask",
"=",
"None",
")",
":",
"pX",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"bma",
".",
"shape",
"[",
"1",
"]",
",",
"stride",
")",
"p... | Return arrays of x and y map coordinates for input array and geotransform | [
"Return",
"arrays",
"of",
"x",
"and",
"y",
"map",
"coordinates",
"for",
"input",
"array",
"and",
"geotransform"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L1922-L1938 | train | 213,087 |
dshean/pygeotools | pygeotools/lib/geolib.py | get_xy_1D | def get_xy_1D(ds, stride=1, getval=False):
"""Return 1D arrays of x and y map coordinates for input GDAL Dataset
"""
gt = ds.GetGeoTransform()
#stride = stride_m/gt[1]
pX = np.arange(0, ds.RasterXSize, stride)
pY = np.arange(0, ds.RasterYSize, stride)
mX, dummy = pixelToMap(pX, pY[0], gt)
dummy, mY = pixelToMap(pX[0], pY, gt)
return mX, mY | python | def get_xy_1D(ds, stride=1, getval=False):
"""Return 1D arrays of x and y map coordinates for input GDAL Dataset
"""
gt = ds.GetGeoTransform()
#stride = stride_m/gt[1]
pX = np.arange(0, ds.RasterXSize, stride)
pY = np.arange(0, ds.RasterYSize, stride)
mX, dummy = pixelToMap(pX, pY[0], gt)
dummy, mY = pixelToMap(pX[0], pY, gt)
return mX, mY | [
"def",
"get_xy_1D",
"(",
"ds",
",",
"stride",
"=",
"1",
",",
"getval",
"=",
"False",
")",
":",
"gt",
"=",
"ds",
".",
"GetGeoTransform",
"(",
")",
"#stride = stride_m/gt[1]",
"pX",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"ds",
".",
"RasterXSize",
","... | Return 1D arrays of x and y map coordinates for input GDAL Dataset | [
"Return",
"1D",
"arrays",
"of",
"x",
"and",
"y",
"map",
"coordinates",
"for",
"input",
"GDAL",
"Dataset"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L1940-L1949 | train | 213,088 |
dshean/pygeotools | pygeotools/lib/geolib.py | get_xy_grids | def get_xy_grids(ds, stride=1, getval=False):
"""Return 2D arrays of x and y map coordinates for input GDAL Dataset
"""
gt = ds.GetGeoTransform()
#stride = stride_m/gt[1]
pX = np.arange(0, ds.RasterXSize, stride)
pY = np.arange(0, ds.RasterYSize, stride)
psamp = np.meshgrid(pX, pY)
mX, mY = pixelToMap(psamp[0], psamp[1], gt)
return mX, mY | python | def get_xy_grids(ds, stride=1, getval=False):
"""Return 2D arrays of x and y map coordinates for input GDAL Dataset
"""
gt = ds.GetGeoTransform()
#stride = stride_m/gt[1]
pX = np.arange(0, ds.RasterXSize, stride)
pY = np.arange(0, ds.RasterYSize, stride)
psamp = np.meshgrid(pX, pY)
mX, mY = pixelToMap(psamp[0], psamp[1], gt)
return mX, mY | [
"def",
"get_xy_grids",
"(",
"ds",
",",
"stride",
"=",
"1",
",",
"getval",
"=",
"False",
")",
":",
"gt",
"=",
"ds",
".",
"GetGeoTransform",
"(",
")",
"#stride = stride_m/gt[1]",
"pX",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"ds",
".",
"RasterXSize",
... | Return 2D arrays of x and y map coordinates for input GDAL Dataset | [
"Return",
"2D",
"arrays",
"of",
"x",
"and",
"y",
"map",
"coordinates",
"for",
"input",
"GDAL",
"Dataset"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L1951-L1960 | train | 213,089 |
dshean/pygeotools | pygeotools/lib/geolib.py | fitPlaneSVD | def fitPlaneSVD(XYZ):
"""Fit a plane to input point data using SVD
"""
[rows,cols] = XYZ.shape
# Set up constraint equations of the form AB = 0,
# where B is a column vector of the plane coefficients
# in the form b(1)*X + b(2)*Y +b(3)*Z + b(4) = 0.
p = (np.ones((rows,1)))
AB = np.hstack([XYZ,p])
[u, d, v] = np.linalg.svd(AB,0)
# Solution is last column of v.
B = np.array(v[3,:])
coeff = -B[[0, 1, 3]]/B[2]
return coeff | python | def fitPlaneSVD(XYZ):
"""Fit a plane to input point data using SVD
"""
[rows,cols] = XYZ.shape
# Set up constraint equations of the form AB = 0,
# where B is a column vector of the plane coefficients
# in the form b(1)*X + b(2)*Y +b(3)*Z + b(4) = 0.
p = (np.ones((rows,1)))
AB = np.hstack([XYZ,p])
[u, d, v] = np.linalg.svd(AB,0)
# Solution is last column of v.
B = np.array(v[3,:])
coeff = -B[[0, 1, 3]]/B[2]
return coeff | [
"def",
"fitPlaneSVD",
"(",
"XYZ",
")",
":",
"[",
"rows",
",",
"cols",
"]",
"=",
"XYZ",
".",
"shape",
"# Set up constraint equations of the form AB = 0,",
"# where B is a column vector of the plane coefficients",
"# in the form b(1)*X + b(2)*Y +b(3)*Z + b(4) = 0.",
"p",
"=",
... | Fit a plane to input point data using SVD | [
"Fit",
"a",
"plane",
"to",
"input",
"point",
"data",
"using",
"SVD"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L1962-L1975 | train | 213,090 |
dshean/pygeotools | pygeotools/lib/geolib.py | fitPlaneLSQ | def fitPlaneLSQ(XYZ):
"""Fit a plane to input point data using LSQ
"""
[rows,cols] = XYZ.shape
G = np.ones((rows,3))
G[:,0] = XYZ[:,0] #X
G[:,1] = XYZ[:,1] #Y
Z = XYZ[:,2]
coeff,resid,rank,s = np.linalg.lstsq(G,Z,rcond=None)
return coeff | python | def fitPlaneLSQ(XYZ):
"""Fit a plane to input point data using LSQ
"""
[rows,cols] = XYZ.shape
G = np.ones((rows,3))
G[:,0] = XYZ[:,0] #X
G[:,1] = XYZ[:,1] #Y
Z = XYZ[:,2]
coeff,resid,rank,s = np.linalg.lstsq(G,Z,rcond=None)
return coeff | [
"def",
"fitPlaneLSQ",
"(",
"XYZ",
")",
":",
"[",
"rows",
",",
"cols",
"]",
"=",
"XYZ",
".",
"shape",
"G",
"=",
"np",
".",
"ones",
"(",
"(",
"rows",
",",
"3",
")",
")",
"G",
"[",
":",
",",
"0",
"]",
"=",
"XYZ",
"[",
":",
",",
"0",
"]",
"... | Fit a plane to input point data using LSQ | [
"Fit",
"a",
"plane",
"to",
"input",
"point",
"data",
"using",
"LSQ"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L1977-L1986 | train | 213,091 |
dshean/pygeotools | pygeotools/lib/geolib.py | ds_fitplane | def ds_fitplane(ds):
"""Fit a plane to values in GDAL Dataset
"""
from pygeotools.lib import iolib
bma = iolib.ds_getma(ds)
gt = ds.GetGeoTransform()
return ma_fitplane(bma, gt) | python | def ds_fitplane(ds):
"""Fit a plane to values in GDAL Dataset
"""
from pygeotools.lib import iolib
bma = iolib.ds_getma(ds)
gt = ds.GetGeoTransform()
return ma_fitplane(bma, gt) | [
"def",
"ds_fitplane",
"(",
"ds",
")",
":",
"from",
"pygeotools",
".",
"lib",
"import",
"iolib",
"bma",
"=",
"iolib",
".",
"ds_getma",
"(",
"ds",
")",
"gt",
"=",
"ds",
".",
"GetGeoTransform",
"(",
")",
"return",
"ma_fitplane",
"(",
"bma",
",",
"gt",
"... | Fit a plane to values in GDAL Dataset | [
"Fit",
"a",
"plane",
"to",
"values",
"in",
"GDAL",
"Dataset"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L2059-L2065 | train | 213,092 |
dshean/pygeotools | pygeotools/lib/geolib.py | getUTMzone | def getUTMzone(geom):
"""Determine UTM Zone for input geometry
"""
#If geom has srs properly defined, can do this
#geom.TransformTo(wgs_srs)
#Get centroid lat/lon
lon, lat = geom.Centroid().GetPoint_2D()
#Make sure we're -180 to 180
lon180 = (lon+180) - np.floor((lon+180)/360)*360 - 180
zonenum = int(np.floor((lon180 + 180)/6) + 1)
#Determine N/S hemisphere
if lat >= 0:
zonehem = 'N'
else:
zonehem = 'S'
#Deal with special cases
if (lat >= 56.0 and lat < 64.0 and lon180 >= 3.0 and lon180 < 12.0):
zonenum = 32
if (lat >= 72.0 and lat < 84.0):
if (lon180 >= 0.0 and lon180 < 9.0):
zonenum = 31
elif (lon180 >= 9.0 and lon180 < 21.0):
zonenum = 33
elif (lon180 >= 21.0 and lon180 < 33.0):
zonenum = 35
elif (lon180 >= 33.0 and lon180 < 42.0):
zonenum = 37
return str(zonenum)+zonehem | python | def getUTMzone(geom):
"""Determine UTM Zone for input geometry
"""
#If geom has srs properly defined, can do this
#geom.TransformTo(wgs_srs)
#Get centroid lat/lon
lon, lat = geom.Centroid().GetPoint_2D()
#Make sure we're -180 to 180
lon180 = (lon+180) - np.floor((lon+180)/360)*360 - 180
zonenum = int(np.floor((lon180 + 180)/6) + 1)
#Determine N/S hemisphere
if lat >= 0:
zonehem = 'N'
else:
zonehem = 'S'
#Deal with special cases
if (lat >= 56.0 and lat < 64.0 and lon180 >= 3.0 and lon180 < 12.0):
zonenum = 32
if (lat >= 72.0 and lat < 84.0):
if (lon180 >= 0.0 and lon180 < 9.0):
zonenum = 31
elif (lon180 >= 9.0 and lon180 < 21.0):
zonenum = 33
elif (lon180 >= 21.0 and lon180 < 33.0):
zonenum = 35
elif (lon180 >= 33.0 and lon180 < 42.0):
zonenum = 37
return str(zonenum)+zonehem | [
"def",
"getUTMzone",
"(",
"geom",
")",
":",
"#If geom has srs properly defined, can do this",
"#geom.TransformTo(wgs_srs)",
"#Get centroid lat/lon",
"lon",
",",
"lat",
"=",
"geom",
".",
"Centroid",
"(",
")",
".",
"GetPoint_2D",
"(",
")",
"#Make sure we're -180 to 180",
... | Determine UTM Zone for input geometry | [
"Determine",
"UTM",
"Zone",
"for",
"input",
"geometry"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L2068-L2095 | train | 213,093 |
dshean/pygeotools | pygeotools/lib/geolib.py | get_proj | def get_proj(geom, proj_list=None):
"""Determine best projection for input geometry
"""
out_srs = None
if proj_list is None:
proj_list = gen_proj_list()
#Go through user-defined projeciton list
for projbox in proj_list:
if projbox.geom.Intersects(geom):
out_srs = projbox.srs
break
#If geom doesn't fall in any of the user projection bbox, use UTM
if out_srs is None:
out_srs = getUTMsrs(geom)
return out_srs | python | def get_proj(geom, proj_list=None):
"""Determine best projection for input geometry
"""
out_srs = None
if proj_list is None:
proj_list = gen_proj_list()
#Go through user-defined projeciton list
for projbox in proj_list:
if projbox.geom.Intersects(geom):
out_srs = projbox.srs
break
#If geom doesn't fall in any of the user projection bbox, use UTM
if out_srs is None:
out_srs = getUTMsrs(geom)
return out_srs | [
"def",
"get_proj",
"(",
"geom",
",",
"proj_list",
"=",
"None",
")",
":",
"out_srs",
"=",
"None",
"if",
"proj_list",
"is",
"None",
":",
"proj_list",
"=",
"gen_proj_list",
"(",
")",
"#Go through user-defined projeciton list",
"for",
"projbox",
"in",
"proj_list",
... | Determine best projection for input geometry | [
"Determine",
"best",
"projection",
"for",
"input",
"geometry"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L2105-L2119 | train | 213,094 |
dshean/pygeotools | pygeotools/lib/geolib.py | gen_proj_list | def gen_proj_list():
"""Create list of projections with cascading preference
"""
#Eventually, just read this in from a text file
proj_list = []
#Alaska
#Note, this spans -180/180
proj_list.append(ProjBox([-180, -130, 51.35, 71.35], 3338))
#proj_list.append(ProjBox([-130, 172.4, 51.35, 71.35], 3338))
#Transantarctic Mountains
proj_list.append(ProjBox([150, 175, -80, -70], 3294))
#Greenland
proj_list.append(ProjBox([-180, 180, 58, 82], 3413))
#Antarctica
proj_list.append(ProjBox([-180, 180, -90, -58], 3031))
#Arctic
proj_list.append(ProjBox([-180, 180, 60, 90], 3413))
return proj_list | python | def gen_proj_list():
"""Create list of projections with cascading preference
"""
#Eventually, just read this in from a text file
proj_list = []
#Alaska
#Note, this spans -180/180
proj_list.append(ProjBox([-180, -130, 51.35, 71.35], 3338))
#proj_list.append(ProjBox([-130, 172.4, 51.35, 71.35], 3338))
#Transantarctic Mountains
proj_list.append(ProjBox([150, 175, -80, -70], 3294))
#Greenland
proj_list.append(ProjBox([-180, 180, 58, 82], 3413))
#Antarctica
proj_list.append(ProjBox([-180, 180, -90, -58], 3031))
#Arctic
proj_list.append(ProjBox([-180, 180, 60, 90], 3413))
return proj_list | [
"def",
"gen_proj_list",
"(",
")",
":",
"#Eventually, just read this in from a text file",
"proj_list",
"=",
"[",
"]",
"#Alaska",
"#Note, this spans -180/180",
"proj_list",
".",
"append",
"(",
"ProjBox",
"(",
"[",
"-",
"180",
",",
"-",
"130",
",",
"51.35",
",",
"... | Create list of projections with cascading preference | [
"Create",
"list",
"of",
"projections",
"with",
"cascading",
"preference"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L2133-L2150 | train | 213,095 |
dshean/pygeotools | pygeotools/lib/geolib.py | xy2geom | def xy2geom(x, y, t_srs=None):
"""Convert x and y point coordinates to geom
"""
geom_wkt = 'POINT({0} {1})'.format(x, y)
geom = ogr.CreateGeometryFromWkt(geom_wkt)
if t_srs is not None and not wgs_srs.IsSame(t_srs):
ct = osr.CoordinateTransformation(t_srs, wgs_srs)
geom.Transform(ct)
geom.AssignSpatialReference(t_srs)
return geom | python | def xy2geom(x, y, t_srs=None):
"""Convert x and y point coordinates to geom
"""
geom_wkt = 'POINT({0} {1})'.format(x, y)
geom = ogr.CreateGeometryFromWkt(geom_wkt)
if t_srs is not None and not wgs_srs.IsSame(t_srs):
ct = osr.CoordinateTransformation(t_srs, wgs_srs)
geom.Transform(ct)
geom.AssignSpatialReference(t_srs)
return geom | [
"def",
"xy2geom",
"(",
"x",
",",
"y",
",",
"t_srs",
"=",
"None",
")",
":",
"geom_wkt",
"=",
"'POINT({0} {1})'",
".",
"format",
"(",
"x",
",",
"y",
")",
"geom",
"=",
"ogr",
".",
"CreateGeometryFromWkt",
"(",
"geom_wkt",
")",
"if",
"t_srs",
"is",
"not"... | Convert x and y point coordinates to geom | [
"Convert",
"x",
"and",
"y",
"point",
"coordinates",
"to",
"geom"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L2187-L2196 | train | 213,096 |
dshean/pygeotools | pygeotools/lib/geolib.py | get_dem_mosaic_cmd | def get_dem_mosaic_cmd(fn_list, o, fn_list_txt=None, tr=None, t_srs=None, t_projwin=None, georef_tile_size=None, threads=None, tile=None, stat=None):
"""
Create ASP dem_mosaic command
Useful for spawning many single-threaded mosaicing processes
"""
cmd = ['dem_mosaic',]
if o is None:
o = 'mos'
cmd.extend(['-o', o])
if threads is None:
from pygeotools.lib import iolib
threads = iolib.cpu_count()
cmd.extend(['--threads', threads])
if tr is not None:
cmd.extend(['--tr', tr])
if t_srs is not None:
#cmd.extend(['--t_srs', t_srs.ExportToProj4()])
cmd.extend(['--t_srs', '"%s"' % t_srs.ExportToProj4()])
#cmd.extend(['--t_srs', "%s" % t_srs.ExportToProj4()])
if t_projwin is not None:
cmd.append('--t_projwin')
cmd.extend(t_projwin)
cmd.append('--force-projwin')
if tile is not None:
#Not yet implemented
#cmd.extend(tile_list)
cmd.append('--tile-index')
cmd.append(tile)
if georef_tile_size is not None:
cmd.extend(['--georef-tile-size', georef_tile_size])
if stat is not None:
if stat == 'wmean':
stat = None
else:
cmd.append('--%s' % stat.replace('index',''))
if stat in ['lastindex', 'firstindex', 'medianindex']:
#This will write out the index map to -last.tif by default
cmd.append('--save-index-map')
#Make sure we don't have ndv that conflicts with 0-based DEM indices
cmd.extend(['--output-nodata-value','-9999'])
#else:
# cmd.extend(['--save-dem-weight', o+'_weight'])
#If user provided a file containing list of DEMs to mosaic (useful to avoid long bash command issues)
if fn_list_txt is not None:
if os.path.exists(fn_list_txt):
cmd.append('-l')
cmd.append(fn_list_txt)
else:
print("Could not find input text file containing list of inputs")
else:
cmd.extend(fn_list)
cmd = [str(i) for i in cmd]
#print(cmd)
#return subprocess.call(cmd)
return cmd | python | def get_dem_mosaic_cmd(fn_list, o, fn_list_txt=None, tr=None, t_srs=None, t_projwin=None, georef_tile_size=None, threads=None, tile=None, stat=None):
"""
Create ASP dem_mosaic command
Useful for spawning many single-threaded mosaicing processes
"""
cmd = ['dem_mosaic',]
if o is None:
o = 'mos'
cmd.extend(['-o', o])
if threads is None:
from pygeotools.lib import iolib
threads = iolib.cpu_count()
cmd.extend(['--threads', threads])
if tr is not None:
cmd.extend(['--tr', tr])
if t_srs is not None:
#cmd.extend(['--t_srs', t_srs.ExportToProj4()])
cmd.extend(['--t_srs', '"%s"' % t_srs.ExportToProj4()])
#cmd.extend(['--t_srs', "%s" % t_srs.ExportToProj4()])
if t_projwin is not None:
cmd.append('--t_projwin')
cmd.extend(t_projwin)
cmd.append('--force-projwin')
if tile is not None:
#Not yet implemented
#cmd.extend(tile_list)
cmd.append('--tile-index')
cmd.append(tile)
if georef_tile_size is not None:
cmd.extend(['--georef-tile-size', georef_tile_size])
if stat is not None:
if stat == 'wmean':
stat = None
else:
cmd.append('--%s' % stat.replace('index',''))
if stat in ['lastindex', 'firstindex', 'medianindex']:
#This will write out the index map to -last.tif by default
cmd.append('--save-index-map')
#Make sure we don't have ndv that conflicts with 0-based DEM indices
cmd.extend(['--output-nodata-value','-9999'])
#else:
# cmd.extend(['--save-dem-weight', o+'_weight'])
#If user provided a file containing list of DEMs to mosaic (useful to avoid long bash command issues)
if fn_list_txt is not None:
if os.path.exists(fn_list_txt):
cmd.append('-l')
cmd.append(fn_list_txt)
else:
print("Could not find input text file containing list of inputs")
else:
cmd.extend(fn_list)
cmd = [str(i) for i in cmd]
#print(cmd)
#return subprocess.call(cmd)
return cmd | [
"def",
"get_dem_mosaic_cmd",
"(",
"fn_list",
",",
"o",
",",
"fn_list_txt",
"=",
"None",
",",
"tr",
"=",
"None",
",",
"t_srs",
"=",
"None",
",",
"t_projwin",
"=",
"None",
",",
"georef_tile_size",
"=",
"None",
",",
"threads",
"=",
"None",
",",
"tile",
"=... | Create ASP dem_mosaic command
Useful for spawning many single-threaded mosaicing processes | [
"Create",
"ASP",
"dem_mosaic",
"command",
"Useful",
"for",
"spawning",
"many",
"single",
"-",
"threaded",
"mosaicing",
"processes"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L2198-L2252 | train | 213,097 |
dshean/pygeotools | pygeotools/lib/warplib.py | parse_rs_alg | def parse_rs_alg(r):
"""Parse resampling algorithm
"""
#Note:GRA_CubicSpline created huge block artifacts for the St. Helen's compute_dh WV cases
#Stick with CubicSpline for both upsampling/downsampling for now
if r == 'near':
#Note: Nearest respects nodata when downsampling
gra = gdal.GRA_NearestNeighbour
elif r == 'bilinear':
gra = gdal.GRA_Bilinear
elif r == 'cubic':
gra = gdal.GRA_Cubic
elif r == 'cubicspline':
gra = gdal.GRA_CubicSpline
elif r == 'average':
gra = gdal.GRA_Average
elif r == 'lanczos':
gra = gdal.GRA_Lanczos
elif r == 'mode':
#Note: Mode respects nodata when downsampling, but very slow
gra = gdal.GRA_Mode
else:
gra = None
sys.exit("Invalid resampling method")
return gra | python | def parse_rs_alg(r):
"""Parse resampling algorithm
"""
#Note:GRA_CubicSpline created huge block artifacts for the St. Helen's compute_dh WV cases
#Stick with CubicSpline for both upsampling/downsampling for now
if r == 'near':
#Note: Nearest respects nodata when downsampling
gra = gdal.GRA_NearestNeighbour
elif r == 'bilinear':
gra = gdal.GRA_Bilinear
elif r == 'cubic':
gra = gdal.GRA_Cubic
elif r == 'cubicspline':
gra = gdal.GRA_CubicSpline
elif r == 'average':
gra = gdal.GRA_Average
elif r == 'lanczos':
gra = gdal.GRA_Lanczos
elif r == 'mode':
#Note: Mode respects nodata when downsampling, but very slow
gra = gdal.GRA_Mode
else:
gra = None
sys.exit("Invalid resampling method")
return gra | [
"def",
"parse_rs_alg",
"(",
"r",
")",
":",
"#Note:GRA_CubicSpline created huge block artifacts for the St. Helen's compute_dh WV cases",
"#Stick with CubicSpline for both upsampling/downsampling for now",
"if",
"r",
"==",
"'near'",
":",
"#Note: Nearest respects nodata when downsampling",
... | Parse resampling algorithm | [
"Parse",
"resampling",
"algorithm"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/warplib.py#L234-L258 | train | 213,098 |
dshean/pygeotools | pygeotools/lib/warplib.py | parse_srs | def parse_srs(t_srs, src_ds_list=None):
"""Parse arbitrary input t_srs
Parameters
----------
t_srs : str or gdal.Dataset or filename
Arbitrary input t_srs
src_ds_list : list of gdal.Dataset objects, optional
Needed if specifying 'first' or 'last'
Returns
-------
t_srs : osr.SpatialReference() object
Output spatial reference system
"""
if t_srs is None and src_ds_list is None:
print("Input t_srs and src_ds_list are both None")
else:
if t_srs is None:
t_srs = 'first'
if t_srs == 'first' and src_ds_list is not None:
t_srs = geolib.get_ds_srs(src_ds_list[0])
elif t_srs == 'last' and src_ds_list is not None:
t_srs = geolib.get_ds_srs(src_ds_list[-1])
#elif t_srs == 'source':
# t_srs = None
elif isinstance(t_srs, osr.SpatialReference):
pass
elif isinstance(t_srs, gdal.Dataset):
t_srs = geolib.get_ds_srs(t_srs)
elif isinstance(t_srs, str) and os.path.exists(t_srs):
t_srs = geolib.get_ds_srs(gdal.Open(t_srs))
elif isinstance(t_srs, str):
temp = osr.SpatialReference()
if 'EPSG' in t_srs.upper():
epsgcode = int(t_srs.split(':')[-1])
temp.ImportFromEPSG(epsgcode)
elif 'proj' in t_srs:
temp.ImportFromProj4(t_srs)
else:
#Assume the user knows what they are doing
temp.ImportFromWkt(t_srs)
t_srs = temp
else:
t_srs = None
return t_srs | python | def parse_srs(t_srs, src_ds_list=None):
"""Parse arbitrary input t_srs
Parameters
----------
t_srs : str or gdal.Dataset or filename
Arbitrary input t_srs
src_ds_list : list of gdal.Dataset objects, optional
Needed if specifying 'first' or 'last'
Returns
-------
t_srs : osr.SpatialReference() object
Output spatial reference system
"""
if t_srs is None and src_ds_list is None:
print("Input t_srs and src_ds_list are both None")
else:
if t_srs is None:
t_srs = 'first'
if t_srs == 'first' and src_ds_list is not None:
t_srs = geolib.get_ds_srs(src_ds_list[0])
elif t_srs == 'last' and src_ds_list is not None:
t_srs = geolib.get_ds_srs(src_ds_list[-1])
#elif t_srs == 'source':
# t_srs = None
elif isinstance(t_srs, osr.SpatialReference):
pass
elif isinstance(t_srs, gdal.Dataset):
t_srs = geolib.get_ds_srs(t_srs)
elif isinstance(t_srs, str) and os.path.exists(t_srs):
t_srs = geolib.get_ds_srs(gdal.Open(t_srs))
elif isinstance(t_srs, str):
temp = osr.SpatialReference()
if 'EPSG' in t_srs.upper():
epsgcode = int(t_srs.split(':')[-1])
temp.ImportFromEPSG(epsgcode)
elif 'proj' in t_srs:
temp.ImportFromProj4(t_srs)
else:
#Assume the user knows what they are doing
temp.ImportFromWkt(t_srs)
t_srs = temp
else:
t_srs = None
return t_srs | [
"def",
"parse_srs",
"(",
"t_srs",
",",
"src_ds_list",
"=",
"None",
")",
":",
"if",
"t_srs",
"is",
"None",
"and",
"src_ds_list",
"is",
"None",
":",
"print",
"(",
"\"Input t_srs and src_ds_list are both None\"",
")",
"else",
":",
"if",
"t_srs",
"is",
"None",
"... | Parse arbitrary input t_srs
Parameters
----------
t_srs : str or gdal.Dataset or filename
Arbitrary input t_srs
src_ds_list : list of gdal.Dataset objects, optional
Needed if specifying 'first' or 'last'
Returns
-------
t_srs : osr.SpatialReference() object
Output spatial reference system | [
"Parse",
"arbitrary",
"input",
"t_srs"
] | 5ac745717c0098d01eb293ff1fe32fd7358c76ab | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/warplib.py#L260-L305 | train | 213,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.