repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
davidhuser/dhis2.py | dhis2/utils.py | pretty_json | python | def pretty_json(obj):
if isinstance(obj, string_types):
try:
obj = json.loads(obj)
except ValueError:
raise ClientException("`obj` is not a json string")
json_str = json.dumps(obj, sort_keys=True, indent=2)
print(highlight(json_str, JsonLexer(), TerminalFormatter())) | Print JSON with indentation and colours
:param obj: the object to print - can be a dict or a string | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/utils.py#L142-L153 | null | # -*- coding: utf-8 -*-
"""
dhis2.utils
~~~~~~~~~~~
This module provides utility functions that are used within dhis2.py
"""
import json
import os
import re
import random
import string
from six import string_types, iteritems
from unicodecsv import DictReader
from pygments import highlight
from pygments.lexers.data import JsonLexer
from pygments.formatters.terminal import TerminalFormatter
from .exceptions import ClientException
def load_csv(path, delimiter=','):
"""
Load CSV file from path and yield CSV rows
Usage:
for row in load_csv('/path/to/file'):
print(row)
or
list(load_csv('/path/to/file'))
:param path: file path
:param delimiter: CSV delimiter
:return: a generator where __next__ is a row of the CSV
"""
try:
with open(path, 'rb') as csvfile:
reader = DictReader(csvfile, delimiter=delimiter)
for row in reader:
yield row
except (OSError, IOError):
raise ClientException("File not found: {}".format(path))
def load_json(path):
"""
Load JSON file from path
:param path: file path
:return: A Python object (e.g. a dict)
"""
try:
with open(path, 'r') as json_file:
return json.load(json_file)
except (OSError, IOError):
raise ClientException("File not found: {}".format(path))
def partition_payload(data, key, thresh):
"""
Yield partitions of a payload
e.g. with a threshold of 2:
{ "dataElements": [1, 2, 3] }
-->
{ "dataElements": [1, 2] }
and
{ "dataElements": [3] }
:param data: the payload
:param key: the key of the dict to partition
:param thresh: the maximum value of a chunk
:return: a generator where __next__ is a partition of the payload
"""
data = data[key]
for i in range(0, len(data), thresh):
yield {key: data[i:i + thresh]}
def search_auth_file(filename='dish.json'):
"""
Search filename in
- A) DHIS_HOME (env variable)
- B) current user's home folder
:param filename: the filename to search for
:return: full path of filename
"""
if 'DHIS_HOME' in os.environ:
return os.path.join(os.environ['DHIS_HOME'], filename)
else:
home_path = os.path.expanduser(os.path.join('~'))
for root, dirs, files in os.walk(home_path):
if filename in files:
return os.path.join(root, filename)
raise ClientException("'{}' not found - searched in $DHIS_HOME and in home folder".format(filename))
def version_to_int(value):
"""
Convert version info to integer
:param value: the version received from system/info, e.g. "2.28"
:return: integer from version, e.g. 28, None if it couldn't be parsed
"""
# remove '-SNAPSHOT'
value = value.replace('-SNAPSHOT', '')
# remove '-RCx'
if '-RC' in value:
value = value.split('-RC', 1)[0]
try:
return int(value.split('.')[1])
except (ValueError, IndexError):
return
def generate_uid():
"""
Create DHIS2 UID matching to Regex
^[A-Za-z][A-Za-z0-9]{10}$
:return: UID string
"""
# first must be a letter
first = random.choice(string.ascii_letters)
# rest must be letters or numbers
rest = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(10))
return first + rest
def is_valid_uid(uid):
"""
:return: True if it is a valid DHIS2 UID, False if not
"""
pattern = r'^[A-Za-z][A-Za-z0-9]{10}$'
if not isinstance(uid, string_types):
return False
return bool(re.compile(pattern).match(uid))
def clean_obj(obj, remove):
"""
Recursively remove keys from list/dict/dict-of-lists/list-of-keys/nested ...,
e.g. remove all sharing keys or remove all 'user' fields
This should result in the same as if running in bash: `jq del(.. | .publicAccess?, .userGroupAccesses?)`
:param obj: the dict to remove keys from
:param remove: keys to remove - can be a string or iterable
"""
if isinstance(remove, string_types):
remove = [remove]
try:
iter(remove)
except TypeError:
raise ClientException("`remove` could not be removed from object: {}".format(repr(remove)))
else:
if isinstance(obj, dict):
obj = {
key: clean_obj(value, remove)
for key, value in iteritems(obj)
if key not in remove
}
elif isinstance(obj, list):
obj = [
clean_obj(item, remove)
for item in obj
if item not in remove
]
return obj
|
davidhuser/dhis2.py | dhis2/utils.py | clean_obj | python | def clean_obj(obj, remove):
if isinstance(remove, string_types):
remove = [remove]
try:
iter(remove)
except TypeError:
raise ClientException("`remove` could not be removed from object: {}".format(repr(remove)))
else:
if isinstance(obj, dict):
obj = {
key: clean_obj(value, remove)
for key, value in iteritems(obj)
if key not in remove
}
elif isinstance(obj, list):
obj = [
clean_obj(item, remove)
for item in obj
if item not in remove
]
return obj | Recursively remove keys from list/dict/dict-of-lists/list-of-keys/nested ...,
e.g. remove all sharing keys or remove all 'user' fields
This should result in the same as if running in bash: `jq del(.. | .publicAccess?, .userGroupAccesses?)`
:param obj: the dict to remove keys from
:param remove: keys to remove - can be a string or iterable | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/utils.py#L156-L183 | null | # -*- coding: utf-8 -*-
"""
dhis2.utils
~~~~~~~~~~~
This module provides utility functions that are used within dhis2.py
"""
import json
import os
import re
import random
import string
from six import string_types, iteritems
from unicodecsv import DictReader
from pygments import highlight
from pygments.lexers.data import JsonLexer
from pygments.formatters.terminal import TerminalFormatter
from .exceptions import ClientException
def load_csv(path, delimiter=','):
"""
Load CSV file from path and yield CSV rows
Usage:
for row in load_csv('/path/to/file'):
print(row)
or
list(load_csv('/path/to/file'))
:param path: file path
:param delimiter: CSV delimiter
:return: a generator where __next__ is a row of the CSV
"""
try:
with open(path, 'rb') as csvfile:
reader = DictReader(csvfile, delimiter=delimiter)
for row in reader:
yield row
except (OSError, IOError):
raise ClientException("File not found: {}".format(path))
def load_json(path):
"""
Load JSON file from path
:param path: file path
:return: A Python object (e.g. a dict)
"""
try:
with open(path, 'r') as json_file:
return json.load(json_file)
except (OSError, IOError):
raise ClientException("File not found: {}".format(path))
def partition_payload(data, key, thresh):
"""
Yield partitions of a payload
e.g. with a threshold of 2:
{ "dataElements": [1, 2, 3] }
-->
{ "dataElements": [1, 2] }
and
{ "dataElements": [3] }
:param data: the payload
:param key: the key of the dict to partition
:param thresh: the maximum value of a chunk
:return: a generator where __next__ is a partition of the payload
"""
data = data[key]
for i in range(0, len(data), thresh):
yield {key: data[i:i + thresh]}
def search_auth_file(filename='dish.json'):
"""
Search filename in
- A) DHIS_HOME (env variable)
- B) current user's home folder
:param filename: the filename to search for
:return: full path of filename
"""
if 'DHIS_HOME' in os.environ:
return os.path.join(os.environ['DHIS_HOME'], filename)
else:
home_path = os.path.expanduser(os.path.join('~'))
for root, dirs, files in os.walk(home_path):
if filename in files:
return os.path.join(root, filename)
raise ClientException("'{}' not found - searched in $DHIS_HOME and in home folder".format(filename))
def version_to_int(value):
"""
Convert version info to integer
:param value: the version received from system/info, e.g. "2.28"
:return: integer from version, e.g. 28, None if it couldn't be parsed
"""
# remove '-SNAPSHOT'
value = value.replace('-SNAPSHOT', '')
# remove '-RCx'
if '-RC' in value:
value = value.split('-RC', 1)[0]
try:
return int(value.split('.')[1])
except (ValueError, IndexError):
return
def generate_uid():
"""
Create DHIS2 UID matching to Regex
^[A-Za-z][A-Za-z0-9]{10}$
:return: UID string
"""
# first must be a letter
first = random.choice(string.ascii_letters)
# rest must be letters or numbers
rest = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(10))
return first + rest
def is_valid_uid(uid):
"""
:return: True if it is a valid DHIS2 UID, False if not
"""
pattern = r'^[A-Za-z][A-Za-z0-9]{10}$'
if not isinstance(uid, string_types):
return False
return bool(re.compile(pattern).match(uid))
def pretty_json(obj):
"""
Print JSON with indentation and colours
:param obj: the object to print - can be a dict or a string
"""
if isinstance(obj, string_types):
try:
obj = json.loads(obj)
except ValueError:
raise ClientException("`obj` is not a json string")
json_str = json.dumps(obj, sort_keys=True, indent=2)
print(highlight(json_str, JsonLexer(), TerminalFormatter()))
|
lazygunner/xunleipy | xunleipy/rsa_lib.py | euclid | python | def euclid(a, b):
a = abs(a)
b = abs(b)
if a < b:
a, b = b, a
while b != 0:
a, b = b, a % b
return a | returns the Greatest Common Divisor of a and b | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rsa_lib.py#L13-L21 | null | #!/usr/bin/env python
import copy
import math
import pickle
import random
from itertools import combinations
import js2py
import hashlib
import base64
def coPrime(l):
"""returns 'True' if the values in the list L are all co-prime
otherwise, it returns 'False'. """
for i, j in combinations(l, 2):
if euclid(i, j) != 1:
return False
return True
def extendedEuclid(a, b):
"""return a tuple of three values: x, y and z, such that x is
the GCD of a and b, and x = y * a + z * b"""
if a == 0:
return b, 0, 1
else:
g, y, x = extendedEuclid(b % a, a)
return g, x - (b // a) * y, y
def modInv(a, m):
"""returns the multiplicative inverse of a in modulo m as a
positive value between zero and m-1"""
# notice that a and m need to co-prime to each other.
if coPrime([a, m]):
linearCombination = extendedEuclid(a, m)
return linearCombination[1] % m
else:
return 0
def extractTwos(m):
"""m is a positive integer. A tuple (s, d) of integers is returned
such that m = (2 ** s) * d."""
# the problem can be break down to count how many '0's are there in
# the end of bin(m). This can be done this way: m & a stretch of '1's
# which can be represent as (2 ** n) - 1.
assert m >= 0
i = 0
while m & (2 ** i) == 0:
i += 1
return i, m >> i
def int2baseTwo(x):
"""x is a positive integer. Convert it to base two as a list of integers
in reverse order as a list."""
# repeating x >>= 1 and x & 1 will do the trick
assert x >= 0
bitInverse = []
while x != 0:
bitInverse.append(x & 1)
x >>= 1
return bitInverse
def modExp(a, d, n):
"""returns a ** d (mod n)"""
assert d >= 0
assert n >= 0
base2D = int2baseTwo(d)
base2DLength = len(base2D)
modArray = []
result = 1
for i in range(1, base2DLength + 1):
if i == 1:
modArray.append(a % n)
else:
modArray.append((modArray[i - 2] ** 2) % n)
for i in range(0, base2DLength):
if base2D[i] == 1:
result *= base2D[i] * modArray[i]
return result % n
def millerRabin(n, k):
"""
Miller Rabin pseudo-prime test
return True means likely a prime, (how sure about that, depending on k)
return False means definitely a composite.
Raise assertion error when n, k are not positive integers
and n is not 1
"""
assert n >= 1
# ensure n is bigger than 1
assert k > 0
# ensure k is a positive integer so everything down here makes sense
if n == 2:
return True
# make sure to return True if n == 2
if n % 2 == 0:
return False
# immediately return False for all the even numbers bigger than 2
extract2 = extractTwos(n - 1)
s = extract2[0]
d = extract2[1]
assert 2 ** s * d == n - 1
def tryComposite(a):
"""Inner function which will inspect whether a given witness
will reveal the true identity of n. Will only be called within
millerRabin"""
x = modExp(a, d, n)
if x == 1 or x == n - 1:
return None
else:
for j in range(1, s):
x = modExp(x, 2, n)
if x == 1:
return False
elif x == n - 1:
return None
return False
for i in range(0, k):
a = random.randint(2, n - 2)
if tryComposite(a) == False:
return False
return True # actually, we should return probably true.
def primeSieve(k):
"""return a list with length k + 1, showing if list[i] == 1, i is a prime
else if list[i] == 0, i is a composite, if list[i] == -1, not defined"""
def isPrime(n):
"""return True is given number n is absolutely prime,
return False is otherwise."""
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
return False
return True
result = [-1] * (k + 1)
for i in range(2, int(k + 1)):
if isPrime(i):
result[i] = 1
else:
result[i] = 0
return result
def findAPrime(a, b, k):
"""Return a pseudo prime number roughly between a and b,
(could be larger than b). Raise ValueError if cannot find a
pseudo prime after 10 * ln(x) + 3 tries. """
x = random.randint(a, b)
for i in range(0, int(10 * math.log(x) + 3)):
if millerRabin(x, k):
return x
else:
x += 1
raise ValueError
def newKey(a, b, k):
""" Try to find two large pseudo primes roughly between a and b.
Generate public and private keys for RSA encryption.
Raises ValueError if it fails to find one"""
try:
p = findAPrime(a, b, k)
while True:
q = findAPrime(a, b, k)
if q != p:
break
except:
raise ValueError
n = p * q
m = (p - 1) * (q - 1)
while True:
e = random.randint(1, m)
if coPrime([e, m]):
break
d = modInv(e, m)
return (n, e, d)
def string2numList(strn):
"""Converts a string to a list of integers based on ASCII values"""
"""origin pickle has bug """
return [ord(chars) for chars in list(strn)]
def numList2string(l):
"""Converts a list of integers to a string based on ASCII values"""
return pickle.loads(''.join(map(chr, l)))
def numList2blocks(l, n):
"""Take a list of integers(each between 0 and 127), and combines them
into block size n using base 256. If len(L) % n != 0, use some random
junk to fill L to make it."""
# Note that ASCII printable characters range is 0x20 - 0x7E
returnList = []
toProcess = copy.copy(l)
''' copy message ascii list'''
if len(toProcess) % n != 0:
for i in range(0, n - len(toProcess) % n):
''' append rand str to list'''
toProcess.append(random.randint(32, 126))
toProcess[len(l)] = 0 # 0 after origin message list
''' generate int from ascii number list'''
for i in range(0, len(toProcess), n):
block = 0
for j in range(0, n):
block += toProcess[i + j] << (8 * (n - j - 1))
returnList.append(block)
return returnList
def blocks2numList(blocks, n):
"""inverse function of numList2blocks."""
toProcess = copy.copy(blocks)
returnList = []
for numBlock in toProcess:
inner = []
for i in range(0, n):
inner.append(numBlock % 256)
numBlock >>= 8
inner.reverse()
returnList.extend(inner)
return returnList
def encrypt(message, modN, e, blockSize):
"""given a string message, public keys and blockSize, encrypt using
RSA algorithms."""
numList = string2numList(message)
numBlocks = numList2blocks(numList, blockSize) # only one block
message = numBlocks[0]
# return [modExp(blocks, e, modN) for blocks in numBlocks]
return modExp(message, e, modN)
def decrypt(secret, modN, d, blockSize):
"""reverse function of encrypt"""
numBlocks = [modExp(blocks, d, modN) for blocks in secret]
numList = blocks2numList(numBlocks, blockSize)
return numList2string(numList)
def block_size(val):
v = int(val)
assert(v >= 10 and v <= 1000)
return val
# Use xunlei's base64 js code directly
hex2b64 = js2py.eval_js(
'''
function hex2b64(h) {
var b64map="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var b64padchar="=";
var i;
var c;
var ret = "";
for(i = 0; i+3 <= h.length; i+=3) {
c = parseInt(h.substring(i,i+3),16);
ret += b64map.charAt(c >> 6) + b64map.charAt(c & 63);
}
if(i+1 == h.length) {
c = parseInt(h.substring(i,i+1),16);
ret += b64map.charAt(c << 2);
}
else if(i+2 == h.length) {
c = parseInt(h.substring(i,i+2),16);
ret += b64map.charAt(c >> 2) + b64map.charAt((c & 3) << 4);
}
while((ret.length & 3) > 0) ret += b64padchar;
return ret;
}
'''
)
def rsa_encrypt_password(password, verify_code, check_n, check_e):
md5 = hashlib.md5()
md5.update(password)
password_hex = md5.hexdigest()
password_hex += verify_code.upper()
int_n = long(base64.decodestring(check_n).encode('hex'), 16)
int_e = long(base64.decodestring(check_e).encode('hex'), 16)
int_encrypted_password = encrypt(password_hex, int_n, int_e, 128)
encrypted_password = hex2b64(format(int_encrypted_password, 'x'))
return encrypted_password
|
lazygunner/xunleipy | xunleipy/rsa_lib.py | coPrime | python | def coPrime(l):
for i, j in combinations(l, 2):
if euclid(i, j) != 1:
return False
return True | returns 'True' if the values in the list L are all co-prime
otherwise, it returns 'False'. | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rsa_lib.py#L24-L30 | [
"def euclid(a, b):\n \"\"\"returns the Greatest Common Divisor of a and b\"\"\"\n a = abs(a)\n b = abs(b)\n if a < b:\n a, b = b, a\n while b != 0:\n a, b = b, a % b\n return a\n"
] | #!/usr/bin/env python
import copy
import math
import pickle
import random
from itertools import combinations
import js2py
import hashlib
import base64
def euclid(a, b):
"""returns the Greatest Common Divisor of a and b"""
a = abs(a)
b = abs(b)
if a < b:
a, b = b, a
while b != 0:
a, b = b, a % b
return a
def extendedEuclid(a, b):
"""return a tuple of three values: x, y and z, such that x is
the GCD of a and b, and x = y * a + z * b"""
if a == 0:
return b, 0, 1
else:
g, y, x = extendedEuclid(b % a, a)
return g, x - (b // a) * y, y
def modInv(a, m):
"""returns the multiplicative inverse of a in modulo m as a
positive value between zero and m-1"""
# notice that a and m need to co-prime to each other.
if coPrime([a, m]):
linearCombination = extendedEuclid(a, m)
return linearCombination[1] % m
else:
return 0
def extractTwos(m):
"""m is a positive integer. A tuple (s, d) of integers is returned
such that m = (2 ** s) * d."""
# the problem can be break down to count how many '0's are there in
# the end of bin(m). This can be done this way: m & a stretch of '1's
# which can be represent as (2 ** n) - 1.
assert m >= 0
i = 0
while m & (2 ** i) == 0:
i += 1
return i, m >> i
def int2baseTwo(x):
"""x is a positive integer. Convert it to base two as a list of integers
in reverse order as a list."""
# repeating x >>= 1 and x & 1 will do the trick
assert x >= 0
bitInverse = []
while x != 0:
bitInverse.append(x & 1)
x >>= 1
return bitInverse
def modExp(a, d, n):
"""returns a ** d (mod n)"""
assert d >= 0
assert n >= 0
base2D = int2baseTwo(d)
base2DLength = len(base2D)
modArray = []
result = 1
for i in range(1, base2DLength + 1):
if i == 1:
modArray.append(a % n)
else:
modArray.append((modArray[i - 2] ** 2) % n)
for i in range(0, base2DLength):
if base2D[i] == 1:
result *= base2D[i] * modArray[i]
return result % n
def millerRabin(n, k):
"""
Miller Rabin pseudo-prime test
return True means likely a prime, (how sure about that, depending on k)
return False means definitely a composite.
Raise assertion error when n, k are not positive integers
and n is not 1
"""
assert n >= 1
# ensure n is bigger than 1
assert k > 0
# ensure k is a positive integer so everything down here makes sense
if n == 2:
return True
# make sure to return True if n == 2
if n % 2 == 0:
return False
# immediately return False for all the even numbers bigger than 2
extract2 = extractTwos(n - 1)
s = extract2[0]
d = extract2[1]
assert 2 ** s * d == n - 1
def tryComposite(a):
"""Inner function which will inspect whether a given witness
will reveal the true identity of n. Will only be called within
millerRabin"""
x = modExp(a, d, n)
if x == 1 or x == n - 1:
return None
else:
for j in range(1, s):
x = modExp(x, 2, n)
if x == 1:
return False
elif x == n - 1:
return None
return False
for i in range(0, k):
a = random.randint(2, n - 2)
if tryComposite(a) == False:
return False
return True # actually, we should return probably true.
def primeSieve(k):
"""return a list with length k + 1, showing if list[i] == 1, i is a prime
else if list[i] == 0, i is a composite, if list[i] == -1, not defined"""
def isPrime(n):
"""return True is given number n is absolutely prime,
return False is otherwise."""
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
return False
return True
result = [-1] * (k + 1)
for i in range(2, int(k + 1)):
if isPrime(i):
result[i] = 1
else:
result[i] = 0
return result
def findAPrime(a, b, k):
"""Return a pseudo prime number roughly between a and b,
(could be larger than b). Raise ValueError if cannot find a
pseudo prime after 10 * ln(x) + 3 tries. """
x = random.randint(a, b)
for i in range(0, int(10 * math.log(x) + 3)):
if millerRabin(x, k):
return x
else:
x += 1
raise ValueError
def newKey(a, b, k):
""" Try to find two large pseudo primes roughly between a and b.
Generate public and private keys for RSA encryption.
Raises ValueError if it fails to find one"""
try:
p = findAPrime(a, b, k)
while True:
q = findAPrime(a, b, k)
if q != p:
break
except:
raise ValueError
n = p * q
m = (p - 1) * (q - 1)
while True:
e = random.randint(1, m)
if coPrime([e, m]):
break
d = modInv(e, m)
return (n, e, d)
def string2numList(strn):
"""Converts a string to a list of integers based on ASCII values"""
"""origin pickle has bug """
return [ord(chars) for chars in list(strn)]
def numList2string(l):
"""Converts a list of integers to a string based on ASCII values"""
return pickle.loads(''.join(map(chr, l)))
def numList2blocks(l, n):
"""Take a list of integers(each between 0 and 127), and combines them
into block size n using base 256. If len(L) % n != 0, use some random
junk to fill L to make it."""
# Note that ASCII printable characters range is 0x20 - 0x7E
returnList = []
toProcess = copy.copy(l)
''' copy message ascii list'''
if len(toProcess) % n != 0:
for i in range(0, n - len(toProcess) % n):
''' append rand str to list'''
toProcess.append(random.randint(32, 126))
toProcess[len(l)] = 0 # 0 after origin message list
''' generate int from ascii number list'''
for i in range(0, len(toProcess), n):
block = 0
for j in range(0, n):
block += toProcess[i + j] << (8 * (n - j - 1))
returnList.append(block)
return returnList
def blocks2numList(blocks, n):
"""inverse function of numList2blocks."""
toProcess = copy.copy(blocks)
returnList = []
for numBlock in toProcess:
inner = []
for i in range(0, n):
inner.append(numBlock % 256)
numBlock >>= 8
inner.reverse()
returnList.extend(inner)
return returnList
def encrypt(message, modN, e, blockSize):
"""given a string message, public keys and blockSize, encrypt using
RSA algorithms."""
numList = string2numList(message)
numBlocks = numList2blocks(numList, blockSize) # only one block
message = numBlocks[0]
# return [modExp(blocks, e, modN) for blocks in numBlocks]
return modExp(message, e, modN)
def decrypt(secret, modN, d, blockSize):
"""reverse function of encrypt"""
numBlocks = [modExp(blocks, d, modN) for blocks in secret]
numList = blocks2numList(numBlocks, blockSize)
return numList2string(numList)
def block_size(val):
v = int(val)
assert(v >= 10 and v <= 1000)
return val
# Use xunlei's base64 js code directly
hex2b64 = js2py.eval_js(
'''
function hex2b64(h) {
var b64map="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var b64padchar="=";
var i;
var c;
var ret = "";
for(i = 0; i+3 <= h.length; i+=3) {
c = parseInt(h.substring(i,i+3),16);
ret += b64map.charAt(c >> 6) + b64map.charAt(c & 63);
}
if(i+1 == h.length) {
c = parseInt(h.substring(i,i+1),16);
ret += b64map.charAt(c << 2);
}
else if(i+2 == h.length) {
c = parseInt(h.substring(i,i+2),16);
ret += b64map.charAt(c >> 2) + b64map.charAt((c & 3) << 4);
}
while((ret.length & 3) > 0) ret += b64padchar;
return ret;
}
'''
)
def rsa_encrypt_password(password, verify_code, check_n, check_e):
md5 = hashlib.md5()
md5.update(password)
password_hex = md5.hexdigest()
password_hex += verify_code.upper()
int_n = long(base64.decodestring(check_n).encode('hex'), 16)
int_e = long(base64.decodestring(check_e).encode('hex'), 16)
int_encrypted_password = encrypt(password_hex, int_n, int_e, 128)
encrypted_password = hex2b64(format(int_encrypted_password, 'x'))
return encrypted_password
|
lazygunner/xunleipy | xunleipy/rsa_lib.py | extendedEuclid | python | def extendedEuclid(a, b):
if a == 0:
return b, 0, 1
else:
g, y, x = extendedEuclid(b % a, a)
return g, x - (b // a) * y, y | return a tuple of three values: x, y and z, such that x is
the GCD of a and b, and x = y * a + z * b | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rsa_lib.py#L33-L40 | [
"def extendedEuclid(a, b):\n \"\"\"return a tuple of three values: x, y and z, such that x is\n the GCD of a and b, and x = y * a + z * b\"\"\"\n if a == 0:\n return b, 0, 1\n else:\n g, y, x = extendedEuclid(b % a, a)\n return g, x - (b // a) * y, y\n"
] | #!/usr/bin/env python
import copy
import math
import pickle
import random
from itertools import combinations
import js2py
import hashlib
import base64
def euclid(a, b):
"""returns the Greatest Common Divisor of a and b"""
a = abs(a)
b = abs(b)
if a < b:
a, b = b, a
while b != 0:
a, b = b, a % b
return a
def coPrime(l):
"""returns 'True' if the values in the list L are all co-prime
otherwise, it returns 'False'. """
for i, j in combinations(l, 2):
if euclid(i, j) != 1:
return False
return True
def modInv(a, m):
"""returns the multiplicative inverse of a in modulo m as a
positive value between zero and m-1"""
# notice that a and m need to co-prime to each other.
if coPrime([a, m]):
linearCombination = extendedEuclid(a, m)
return linearCombination[1] % m
else:
return 0
def extractTwos(m):
"""m is a positive integer. A tuple (s, d) of integers is returned
such that m = (2 ** s) * d."""
# the problem can be break down to count how many '0's are there in
# the end of bin(m). This can be done this way: m & a stretch of '1's
# which can be represent as (2 ** n) - 1.
assert m >= 0
i = 0
while m & (2 ** i) == 0:
i += 1
return i, m >> i
def int2baseTwo(x):
"""x is a positive integer. Convert it to base two as a list of integers
in reverse order as a list."""
# repeating x >>= 1 and x & 1 will do the trick
assert x >= 0
bitInverse = []
while x != 0:
bitInverse.append(x & 1)
x >>= 1
return bitInverse
def modExp(a, d, n):
"""returns a ** d (mod n)"""
assert d >= 0
assert n >= 0
base2D = int2baseTwo(d)
base2DLength = len(base2D)
modArray = []
result = 1
for i in range(1, base2DLength + 1):
if i == 1:
modArray.append(a % n)
else:
modArray.append((modArray[i - 2] ** 2) % n)
for i in range(0, base2DLength):
if base2D[i] == 1:
result *= base2D[i] * modArray[i]
return result % n
def millerRabin(n, k):
"""
Miller Rabin pseudo-prime test
return True means likely a prime, (how sure about that, depending on k)
return False means definitely a composite.
Raise assertion error when n, k are not positive integers
and n is not 1
"""
assert n >= 1
# ensure n is bigger than 1
assert k > 0
# ensure k is a positive integer so everything down here makes sense
if n == 2:
return True
# make sure to return True if n == 2
if n % 2 == 0:
return False
# immediately return False for all the even numbers bigger than 2
extract2 = extractTwos(n - 1)
s = extract2[0]
d = extract2[1]
assert 2 ** s * d == n - 1
def tryComposite(a):
"""Inner function which will inspect whether a given witness
will reveal the true identity of n. Will only be called within
millerRabin"""
x = modExp(a, d, n)
if x == 1 or x == n - 1:
return None
else:
for j in range(1, s):
x = modExp(x, 2, n)
if x == 1:
return False
elif x == n - 1:
return None
return False
for i in range(0, k):
a = random.randint(2, n - 2)
if tryComposite(a) == False:
return False
return True # actually, we should return probably true.
def primeSieve(k):
"""return a list with length k + 1, showing if list[i] == 1, i is a prime
else if list[i] == 0, i is a composite, if list[i] == -1, not defined"""
def isPrime(n):
"""return True is given number n is absolutely prime,
return False is otherwise."""
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
return False
return True
result = [-1] * (k + 1)
for i in range(2, int(k + 1)):
if isPrime(i):
result[i] = 1
else:
result[i] = 0
return result
def findAPrime(a, b, k):
"""Return a pseudo prime number roughly between a and b,
(could be larger than b). Raise ValueError if cannot find a
pseudo prime after 10 * ln(x) + 3 tries. """
x = random.randint(a, b)
for i in range(0, int(10 * math.log(x) + 3)):
if millerRabin(x, k):
return x
else:
x += 1
raise ValueError
def newKey(a, b, k):
""" Try to find two large pseudo primes roughly between a and b.
Generate public and private keys for RSA encryption.
Raises ValueError if it fails to find one"""
try:
p = findAPrime(a, b, k)
while True:
q = findAPrime(a, b, k)
if q != p:
break
except:
raise ValueError
n = p * q
m = (p - 1) * (q - 1)
while True:
e = random.randint(1, m)
if coPrime([e, m]):
break
d = modInv(e, m)
return (n, e, d)
def string2numList(strn):
"""Converts a string to a list of integers based on ASCII values"""
"""origin pickle has bug """
return [ord(chars) for chars in list(strn)]
def numList2string(l):
"""Converts a list of integers to a string based on ASCII values"""
return pickle.loads(''.join(map(chr, l)))
def numList2blocks(l, n):
"""Take a list of integers(each between 0 and 127), and combines them
into block size n using base 256. If len(L) % n != 0, use some random
junk to fill L to make it."""
# Note that ASCII printable characters range is 0x20 - 0x7E
returnList = []
toProcess = copy.copy(l)
''' copy message ascii list'''
if len(toProcess) % n != 0:
for i in range(0, n - len(toProcess) % n):
''' append rand str to list'''
toProcess.append(random.randint(32, 126))
toProcess[len(l)] = 0 # 0 after origin message list
''' generate int from ascii number list'''
for i in range(0, len(toProcess), n):
block = 0
for j in range(0, n):
block += toProcess[i + j] << (8 * (n - j - 1))
returnList.append(block)
return returnList
def blocks2numList(blocks, n):
"""inverse function of numList2blocks."""
toProcess = copy.copy(blocks)
returnList = []
for numBlock in toProcess:
inner = []
for i in range(0, n):
inner.append(numBlock % 256)
numBlock >>= 8
inner.reverse()
returnList.extend(inner)
return returnList
def encrypt(message, modN, e, blockSize):
"""given a string message, public keys and blockSize, encrypt using
RSA algorithms."""
numList = string2numList(message)
numBlocks = numList2blocks(numList, blockSize) # only one block
message = numBlocks[0]
# return [modExp(blocks, e, modN) for blocks in numBlocks]
return modExp(message, e, modN)
def decrypt(secret, modN, d, blockSize):
"""reverse function of encrypt"""
numBlocks = [modExp(blocks, d, modN) for blocks in secret]
numList = blocks2numList(numBlocks, blockSize)
return numList2string(numList)
def block_size(val):
v = int(val)
assert(v >= 10 and v <= 1000)
return val
# Use xunlei's base64 js code directly
hex2b64 = js2py.eval_js(
'''
function hex2b64(h) {
var b64map="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var b64padchar="=";
var i;
var c;
var ret = "";
for(i = 0; i+3 <= h.length; i+=3) {
c = parseInt(h.substring(i,i+3),16);
ret += b64map.charAt(c >> 6) + b64map.charAt(c & 63);
}
if(i+1 == h.length) {
c = parseInt(h.substring(i,i+1),16);
ret += b64map.charAt(c << 2);
}
else if(i+2 == h.length) {
c = parseInt(h.substring(i,i+2),16);
ret += b64map.charAt(c >> 2) + b64map.charAt((c & 3) << 4);
}
while((ret.length & 3) > 0) ret += b64padchar;
return ret;
}
'''
)
def rsa_encrypt_password(password, verify_code, check_n, check_e):
md5 = hashlib.md5()
md5.update(password)
password_hex = md5.hexdigest()
password_hex += verify_code.upper()
int_n = long(base64.decodestring(check_n).encode('hex'), 16)
int_e = long(base64.decodestring(check_e).encode('hex'), 16)
int_encrypted_password = encrypt(password_hex, int_n, int_e, 128)
encrypted_password = hex2b64(format(int_encrypted_password, 'x'))
return encrypted_password
|
lazygunner/xunleipy | xunleipy/rsa_lib.py | modInv | python | def modInv(a, m):
# notice that a and m need to co-prime to each other.
if coPrime([a, m]):
linearCombination = extendedEuclid(a, m)
return linearCombination[1] % m
else:
return 0 | returns the multiplicative inverse of a in modulo m as a
positive value between zero and m-1 | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rsa_lib.py#L43-L51 | [
"def coPrime(l):\n \"\"\"returns 'True' if the values in the list L are all co-prime\n otherwise, it returns 'False'. \"\"\"\n for i, j in combinations(l, 2):\n if euclid(i, j) != 1:\n return False\n return True\n",
"def extendedEuclid(a, b):\n \"\"\"return a tuple of three val... | #!/usr/bin/env python
import copy
import math
import pickle
import random
from itertools import combinations
import js2py
import hashlib
import base64
def euclid(a, b):
"""returns the Greatest Common Divisor of a and b"""
a = abs(a)
b = abs(b)
if a < b:
a, b = b, a
while b != 0:
a, b = b, a % b
return a
def coPrime(l):
"""returns 'True' if the values in the list L are all co-prime
otherwise, it returns 'False'. """
for i, j in combinations(l, 2):
if euclid(i, j) != 1:
return False
return True
def extendedEuclid(a, b):
"""return a tuple of three values: x, y and z, such that x is
the GCD of a and b, and x = y * a + z * b"""
if a == 0:
return b, 0, 1
else:
g, y, x = extendedEuclid(b % a, a)
return g, x - (b // a) * y, y
def extractTwos(m):
"""m is a positive integer. A tuple (s, d) of integers is returned
such that m = (2 ** s) * d."""
# the problem can be break down to count how many '0's are there in
# the end of bin(m). This can be done this way: m & a stretch of '1's
# which can be represent as (2 ** n) - 1.
assert m >= 0
i = 0
while m & (2 ** i) == 0:
i += 1
return i, m >> i
def int2baseTwo(x):
"""x is a positive integer. Convert it to base two as a list of integers
in reverse order as a list."""
# repeating x >>= 1 and x & 1 will do the trick
assert x >= 0
bitInverse = []
while x != 0:
bitInverse.append(x & 1)
x >>= 1
return bitInverse
def modExp(a, d, n):
"""returns a ** d (mod n)"""
assert d >= 0
assert n >= 0
base2D = int2baseTwo(d)
base2DLength = len(base2D)
modArray = []
result = 1
for i in range(1, base2DLength + 1):
if i == 1:
modArray.append(a % n)
else:
modArray.append((modArray[i - 2] ** 2) % n)
for i in range(0, base2DLength):
if base2D[i] == 1:
result *= base2D[i] * modArray[i]
return result % n
def millerRabin(n, k):
"""
Miller Rabin pseudo-prime test
return True means likely a prime, (how sure about that, depending on k)
return False means definitely a composite.
Raise assertion error when n, k are not positive integers
and n is not 1
"""
assert n >= 1
# ensure n is bigger than 1
assert k > 0
# ensure k is a positive integer so everything down here makes sense
if n == 2:
return True
# make sure to return True if n == 2
if n % 2 == 0:
return False
# immediately return False for all the even numbers bigger than 2
extract2 = extractTwos(n - 1)
s = extract2[0]
d = extract2[1]
assert 2 ** s * d == n - 1
def tryComposite(a):
"""Inner function which will inspect whether a given witness
will reveal the true identity of n. Will only be called within
millerRabin"""
x = modExp(a, d, n)
if x == 1 or x == n - 1:
return None
else:
for j in range(1, s):
x = modExp(x, 2, n)
if x == 1:
return False
elif x == n - 1:
return None
return False
for i in range(0, k):
a = random.randint(2, n - 2)
if tryComposite(a) == False:
return False
return True # actually, we should return probably true.
def primeSieve(k):
"""return a list with length k + 1, showing if list[i] == 1, i is a prime
else if list[i] == 0, i is a composite, if list[i] == -1, not defined"""
def isPrime(n):
"""return True is given number n is absolutely prime,
return False is otherwise."""
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
return False
return True
result = [-1] * (k + 1)
for i in range(2, int(k + 1)):
if isPrime(i):
result[i] = 1
else:
result[i] = 0
return result
def findAPrime(a, b, k):
"""Return a pseudo prime number roughly between a and b,
(could be larger than b). Raise ValueError if cannot find a
pseudo prime after 10 * ln(x) + 3 tries. """
x = random.randint(a, b)
for i in range(0, int(10 * math.log(x) + 3)):
if millerRabin(x, k):
return x
else:
x += 1
raise ValueError
def newKey(a, b, k):
""" Try to find two large pseudo primes roughly between a and b.
Generate public and private keys for RSA encryption.
Raises ValueError if it fails to find one"""
try:
p = findAPrime(a, b, k)
while True:
q = findAPrime(a, b, k)
if q != p:
break
except:
raise ValueError
n = p * q
m = (p - 1) * (q - 1)
while True:
e = random.randint(1, m)
if coPrime([e, m]):
break
d = modInv(e, m)
return (n, e, d)
def string2numList(strn):
"""Converts a string to a list of integers based on ASCII values"""
"""origin pickle has bug """
return [ord(chars) for chars in list(strn)]
def numList2string(l):
"""Converts a list of integers to a string based on ASCII values"""
return pickle.loads(''.join(map(chr, l)))
def numList2blocks(l, n):
"""Take a list of integers(each between 0 and 127), and combines them
into block size n using base 256. If len(L) % n != 0, use some random
junk to fill L to make it."""
# Note that ASCII printable characters range is 0x20 - 0x7E
returnList = []
toProcess = copy.copy(l)
''' copy message ascii list'''
if len(toProcess) % n != 0:
for i in range(0, n - len(toProcess) % n):
''' append rand str to list'''
toProcess.append(random.randint(32, 126))
toProcess[len(l)] = 0 # 0 after origin message list
''' generate int from ascii number list'''
for i in range(0, len(toProcess), n):
block = 0
for j in range(0, n):
block += toProcess[i + j] << (8 * (n - j - 1))
returnList.append(block)
return returnList
def blocks2numList(blocks, n):
"""inverse function of numList2blocks."""
toProcess = copy.copy(blocks)
returnList = []
for numBlock in toProcess:
inner = []
for i in range(0, n):
inner.append(numBlock % 256)
numBlock >>= 8
inner.reverse()
returnList.extend(inner)
return returnList
def encrypt(message, modN, e, blockSize):
"""given a string message, public keys and blockSize, encrypt using
RSA algorithms."""
numList = string2numList(message)
numBlocks = numList2blocks(numList, blockSize) # only one block
message = numBlocks[0]
# return [modExp(blocks, e, modN) for blocks in numBlocks]
return modExp(message, e, modN)
def decrypt(secret, modN, d, blockSize):
"""reverse function of encrypt"""
numBlocks = [modExp(blocks, d, modN) for blocks in secret]
numList = blocks2numList(numBlocks, blockSize)
return numList2string(numList)
def block_size(val):
v = int(val)
assert(v >= 10 and v <= 1000)
return val
# Use xunlei's base64 js code directly
hex2b64 = js2py.eval_js(
'''
function hex2b64(h) {
var b64map="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var b64padchar="=";
var i;
var c;
var ret = "";
for(i = 0; i+3 <= h.length; i+=3) {
c = parseInt(h.substring(i,i+3),16);
ret += b64map.charAt(c >> 6) + b64map.charAt(c & 63);
}
if(i+1 == h.length) {
c = parseInt(h.substring(i,i+1),16);
ret += b64map.charAt(c << 2);
}
else if(i+2 == h.length) {
c = parseInt(h.substring(i,i+2),16);
ret += b64map.charAt(c >> 2) + b64map.charAt((c & 3) << 4);
}
while((ret.length & 3) > 0) ret += b64padchar;
return ret;
}
'''
)
def rsa_encrypt_password(password, verify_code, check_n, check_e):
md5 = hashlib.md5()
md5.update(password)
password_hex = md5.hexdigest()
password_hex += verify_code.upper()
int_n = long(base64.decodestring(check_n).encode('hex'), 16)
int_e = long(base64.decodestring(check_e).encode('hex'), 16)
int_encrypted_password = encrypt(password_hex, int_n, int_e, 128)
encrypted_password = hex2b64(format(int_encrypted_password, 'x'))
return encrypted_password
|
lazygunner/xunleipy | xunleipy/rsa_lib.py | extractTwos | python | def extractTwos(m):
# the problem can be break down to count how many '0's are there in
# the end of bin(m). This can be done this way: m & a stretch of '1's
# which can be represent as (2 ** n) - 1.
assert m >= 0
i = 0
while m & (2 ** i) == 0:
i += 1
return i, m >> i | m is a positive integer. A tuple (s, d) of integers is returned
such that m = (2 ** s) * d. | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rsa_lib.py#L54-L64 | null | #!/usr/bin/env python
import copy
import math
import pickle
import random
from itertools import combinations
import js2py
import hashlib
import base64
def euclid(a, b):
"""returns the Greatest Common Divisor of a and b"""
a = abs(a)
b = abs(b)
if a < b:
a, b = b, a
while b != 0:
a, b = b, a % b
return a
def coPrime(l):
"""returns 'True' if the values in the list L are all co-prime
otherwise, it returns 'False'. """
for i, j in combinations(l, 2):
if euclid(i, j) != 1:
return False
return True
def extendedEuclid(a, b):
"""return a tuple of three values: x, y and z, such that x is
the GCD of a and b, and x = y * a + z * b"""
if a == 0:
return b, 0, 1
else:
g, y, x = extendedEuclid(b % a, a)
return g, x - (b // a) * y, y
def modInv(a, m):
"""returns the multiplicative inverse of a in modulo m as a
positive value between zero and m-1"""
# notice that a and m need to co-prime to each other.
if coPrime([a, m]):
linearCombination = extendedEuclid(a, m)
return linearCombination[1] % m
else:
return 0
def int2baseTwo(x):
"""x is a positive integer. Convert it to base two as a list of integers
in reverse order as a list."""
# repeating x >>= 1 and x & 1 will do the trick
assert x >= 0
bitInverse = []
while x != 0:
bitInverse.append(x & 1)
x >>= 1
return bitInverse
def modExp(a, d, n):
"""returns a ** d (mod n)"""
assert d >= 0
assert n >= 0
base2D = int2baseTwo(d)
base2DLength = len(base2D)
modArray = []
result = 1
for i in range(1, base2DLength + 1):
if i == 1:
modArray.append(a % n)
else:
modArray.append((modArray[i - 2] ** 2) % n)
for i in range(0, base2DLength):
if base2D[i] == 1:
result *= base2D[i] * modArray[i]
return result % n
def millerRabin(n, k):
"""
Miller Rabin pseudo-prime test
return True means likely a prime, (how sure about that, depending on k)
return False means definitely a composite.
Raise assertion error when n, k are not positive integers
and n is not 1
"""
assert n >= 1
# ensure n is bigger than 1
assert k > 0
# ensure k is a positive integer so everything down here makes sense
if n == 2:
return True
# make sure to return True if n == 2
if n % 2 == 0:
return False
# immediately return False for all the even numbers bigger than 2
extract2 = extractTwos(n - 1)
s = extract2[0]
d = extract2[1]
assert 2 ** s * d == n - 1
def tryComposite(a):
"""Inner function which will inspect whether a given witness
will reveal the true identity of n. Will only be called within
millerRabin"""
x = modExp(a, d, n)
if x == 1 or x == n - 1:
return None
else:
for j in range(1, s):
x = modExp(x, 2, n)
if x == 1:
return False
elif x == n - 1:
return None
return False
for i in range(0, k):
a = random.randint(2, n - 2)
if tryComposite(a) == False:
return False
return True # actually, we should return probably true.
def primeSieve(k):
"""return a list with length k + 1, showing if list[i] == 1, i is a prime
else if list[i] == 0, i is a composite, if list[i] == -1, not defined"""
def isPrime(n):
"""return True is given number n is absolutely prime,
return False is otherwise."""
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
return False
return True
result = [-1] * (k + 1)
for i in range(2, int(k + 1)):
if isPrime(i):
result[i] = 1
else:
result[i] = 0
return result
def findAPrime(a, b, k):
"""Return a pseudo prime number roughly between a and b,
(could be larger than b). Raise ValueError if cannot find a
pseudo prime after 10 * ln(x) + 3 tries. """
x = random.randint(a, b)
for i in range(0, int(10 * math.log(x) + 3)):
if millerRabin(x, k):
return x
else:
x += 1
raise ValueError
def newKey(a, b, k):
""" Try to find two large pseudo primes roughly between a and b.
Generate public and private keys for RSA encryption.
Raises ValueError if it fails to find one"""
try:
p = findAPrime(a, b, k)
while True:
q = findAPrime(a, b, k)
if q != p:
break
except:
raise ValueError
n = p * q
m = (p - 1) * (q - 1)
while True:
e = random.randint(1, m)
if coPrime([e, m]):
break
d = modInv(e, m)
return (n, e, d)
def string2numList(strn):
"""Converts a string to a list of integers based on ASCII values"""
"""origin pickle has bug """
return [ord(chars) for chars in list(strn)]
def numList2string(l):
"""Converts a list of integers to a string based on ASCII values"""
return pickle.loads(''.join(map(chr, l)))
def numList2blocks(l, n):
"""Take a list of integers(each between 0 and 127), and combines them
into block size n using base 256. If len(L) % n != 0, use some random
junk to fill L to make it."""
# Note that ASCII printable characters range is 0x20 - 0x7E
returnList = []
toProcess = copy.copy(l)
''' copy message ascii list'''
if len(toProcess) % n != 0:
for i in range(0, n - len(toProcess) % n):
''' append rand str to list'''
toProcess.append(random.randint(32, 126))
toProcess[len(l)] = 0 # 0 after origin message list
''' generate int from ascii number list'''
for i in range(0, len(toProcess), n):
block = 0
for j in range(0, n):
block += toProcess[i + j] << (8 * (n - j - 1))
returnList.append(block)
return returnList
def blocks2numList(blocks, n):
"""inverse function of numList2blocks."""
toProcess = copy.copy(blocks)
returnList = []
for numBlock in toProcess:
inner = []
for i in range(0, n):
inner.append(numBlock % 256)
numBlock >>= 8
inner.reverse()
returnList.extend(inner)
return returnList
def encrypt(message, modN, e, blockSize):
"""given a string message, public keys and blockSize, encrypt using
RSA algorithms."""
numList = string2numList(message)
numBlocks = numList2blocks(numList, blockSize) # only one block
message = numBlocks[0]
# return [modExp(blocks, e, modN) for blocks in numBlocks]
return modExp(message, e, modN)
def decrypt(secret, modN, d, blockSize):
"""reverse function of encrypt"""
numBlocks = [modExp(blocks, d, modN) for blocks in secret]
numList = blocks2numList(numBlocks, blockSize)
return numList2string(numList)
def block_size(val):
v = int(val)
assert(v >= 10 and v <= 1000)
return val
# Use xunlei's base64 js code directly
hex2b64 = js2py.eval_js(
'''
function hex2b64(h) {
var b64map="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var b64padchar="=";
var i;
var c;
var ret = "";
for(i = 0; i+3 <= h.length; i+=3) {
c = parseInt(h.substring(i,i+3),16);
ret += b64map.charAt(c >> 6) + b64map.charAt(c & 63);
}
if(i+1 == h.length) {
c = parseInt(h.substring(i,i+1),16);
ret += b64map.charAt(c << 2);
}
else if(i+2 == h.length) {
c = parseInt(h.substring(i,i+2),16);
ret += b64map.charAt(c >> 2) + b64map.charAt((c & 3) << 4);
}
while((ret.length & 3) > 0) ret += b64padchar;
return ret;
}
'''
)
def rsa_encrypt_password(password, verify_code, check_n, check_e):
md5 = hashlib.md5()
md5.update(password)
password_hex = md5.hexdigest()
password_hex += verify_code.upper()
int_n = long(base64.decodestring(check_n).encode('hex'), 16)
int_e = long(base64.decodestring(check_e).encode('hex'), 16)
int_encrypted_password = encrypt(password_hex, int_n, int_e, 128)
encrypted_password = hex2b64(format(int_encrypted_password, 'x'))
return encrypted_password
|
lazygunner/xunleipy | xunleipy/rsa_lib.py | int2baseTwo | python | def int2baseTwo(x):
# repeating x >>= 1 and x & 1 will do the trick
assert x >= 0
bitInverse = []
while x != 0:
bitInverse.append(x & 1)
x >>= 1
return bitInverse | x is a positive integer. Convert it to base two as a list of integers
in reverse order as a list. | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rsa_lib.py#L67-L76 | null | #!/usr/bin/env python
import copy
import math
import pickle
import random
from itertools import combinations
import js2py
import hashlib
import base64
def euclid(a, b):
"""returns the Greatest Common Divisor of a and b"""
a = abs(a)
b = abs(b)
if a < b:
a, b = b, a
while b != 0:
a, b = b, a % b
return a
def coPrime(l):
"""returns 'True' if the values in the list L are all co-prime
otherwise, it returns 'False'. """
for i, j in combinations(l, 2):
if euclid(i, j) != 1:
return False
return True
def extendedEuclid(a, b):
"""return a tuple of three values: x, y and z, such that x is
the GCD of a and b, and x = y * a + z * b"""
if a == 0:
return b, 0, 1
else:
g, y, x = extendedEuclid(b % a, a)
return g, x - (b // a) * y, y
def modInv(a, m):
"""returns the multiplicative inverse of a in modulo m as a
positive value between zero and m-1"""
# notice that a and m need to co-prime to each other.
if coPrime([a, m]):
linearCombination = extendedEuclid(a, m)
return linearCombination[1] % m
else:
return 0
def extractTwos(m):
"""m is a positive integer. A tuple (s, d) of integers is returned
such that m = (2 ** s) * d."""
# the problem can be break down to count how many '0's are there in
# the end of bin(m). This can be done this way: m & a stretch of '1's
# which can be represent as (2 ** n) - 1.
assert m >= 0
i = 0
while m & (2 ** i) == 0:
i += 1
return i, m >> i
def modExp(a, d, n):
"""returns a ** d (mod n)"""
assert d >= 0
assert n >= 0
base2D = int2baseTwo(d)
base2DLength = len(base2D)
modArray = []
result = 1
for i in range(1, base2DLength + 1):
if i == 1:
modArray.append(a % n)
else:
modArray.append((modArray[i - 2] ** 2) % n)
for i in range(0, base2DLength):
if base2D[i] == 1:
result *= base2D[i] * modArray[i]
return result % n
def millerRabin(n, k):
"""
Miller Rabin pseudo-prime test
return True means likely a prime, (how sure about that, depending on k)
return False means definitely a composite.
Raise assertion error when n, k are not positive integers
and n is not 1
"""
assert n >= 1
# ensure n is bigger than 1
assert k > 0
# ensure k is a positive integer so everything down here makes sense
if n == 2:
return True
# make sure to return True if n == 2
if n % 2 == 0:
return False
# immediately return False for all the even numbers bigger than 2
extract2 = extractTwos(n - 1)
s = extract2[0]
d = extract2[1]
assert 2 ** s * d == n - 1
def tryComposite(a):
"""Inner function which will inspect whether a given witness
will reveal the true identity of n. Will only be called within
millerRabin"""
x = modExp(a, d, n)
if x == 1 or x == n - 1:
return None
else:
for j in range(1, s):
x = modExp(x, 2, n)
if x == 1:
return False
elif x == n - 1:
return None
return False
for i in range(0, k):
a = random.randint(2, n - 2)
if tryComposite(a) == False:
return False
return True # actually, we should return probably true.
def primeSieve(k):
"""return a list with length k + 1, showing if list[i] == 1, i is a prime
else if list[i] == 0, i is a composite, if list[i] == -1, not defined"""
def isPrime(n):
"""return True is given number n is absolutely prime,
return False is otherwise."""
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
return False
return True
result = [-1] * (k + 1)
for i in range(2, int(k + 1)):
if isPrime(i):
result[i] = 1
else:
result[i] = 0
return result
def findAPrime(a, b, k):
"""Return a pseudo prime number roughly between a and b,
(could be larger than b). Raise ValueError if cannot find a
pseudo prime after 10 * ln(x) + 3 tries. """
x = random.randint(a, b)
for i in range(0, int(10 * math.log(x) + 3)):
if millerRabin(x, k):
return x
else:
x += 1
raise ValueError
def newKey(a, b, k):
""" Try to find two large pseudo primes roughly between a and b.
Generate public and private keys for RSA encryption.
Raises ValueError if it fails to find one"""
try:
p = findAPrime(a, b, k)
while True:
q = findAPrime(a, b, k)
if q != p:
break
except:
raise ValueError
n = p * q
m = (p - 1) * (q - 1)
while True:
e = random.randint(1, m)
if coPrime([e, m]):
break
d = modInv(e, m)
return (n, e, d)
def string2numList(strn):
"""Converts a string to a list of integers based on ASCII values"""
"""origin pickle has bug """
return [ord(chars) for chars in list(strn)]
def numList2string(l):
"""Converts a list of integers to a string based on ASCII values"""
return pickle.loads(''.join(map(chr, l)))
def numList2blocks(l, n):
"""Take a list of integers(each between 0 and 127), and combines them
into block size n using base 256. If len(L) % n != 0, use some random
junk to fill L to make it."""
# Note that ASCII printable characters range is 0x20 - 0x7E
returnList = []
toProcess = copy.copy(l)
''' copy message ascii list'''
if len(toProcess) % n != 0:
for i in range(0, n - len(toProcess) % n):
''' append rand str to list'''
toProcess.append(random.randint(32, 126))
toProcess[len(l)] = 0 # 0 after origin message list
''' generate int from ascii number list'''
for i in range(0, len(toProcess), n):
block = 0
for j in range(0, n):
block += toProcess[i + j] << (8 * (n - j - 1))
returnList.append(block)
return returnList
def blocks2numList(blocks, n):
"""inverse function of numList2blocks."""
toProcess = copy.copy(blocks)
returnList = []
for numBlock in toProcess:
inner = []
for i in range(0, n):
inner.append(numBlock % 256)
numBlock >>= 8
inner.reverse()
returnList.extend(inner)
return returnList
def encrypt(message, modN, e, blockSize):
"""given a string message, public keys and blockSize, encrypt using
RSA algorithms."""
numList = string2numList(message)
numBlocks = numList2blocks(numList, blockSize) # only one block
message = numBlocks[0]
# return [modExp(blocks, e, modN) for blocks in numBlocks]
return modExp(message, e, modN)
def decrypt(secret, modN, d, blockSize):
"""reverse function of encrypt"""
numBlocks = [modExp(blocks, d, modN) for blocks in secret]
numList = blocks2numList(numBlocks, blockSize)
return numList2string(numList)
def block_size(val):
v = int(val)
assert(v >= 10 and v <= 1000)
return val
# Use xunlei's base64 js code directly
hex2b64 = js2py.eval_js(
'''
function hex2b64(h) {
var b64map="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var b64padchar="=";
var i;
var c;
var ret = "";
for(i = 0; i+3 <= h.length; i+=3) {
c = parseInt(h.substring(i,i+3),16);
ret += b64map.charAt(c >> 6) + b64map.charAt(c & 63);
}
if(i+1 == h.length) {
c = parseInt(h.substring(i,i+1),16);
ret += b64map.charAt(c << 2);
}
else if(i+2 == h.length) {
c = parseInt(h.substring(i,i+2),16);
ret += b64map.charAt(c >> 2) + b64map.charAt((c & 3) << 4);
}
while((ret.length & 3) > 0) ret += b64padchar;
return ret;
}
'''
)
def rsa_encrypt_password(password, verify_code, check_n, check_e):
md5 = hashlib.md5()
md5.update(password)
password_hex = md5.hexdigest()
password_hex += verify_code.upper()
int_n = long(base64.decodestring(check_n).encode('hex'), 16)
int_e = long(base64.decodestring(check_e).encode('hex'), 16)
int_encrypted_password = encrypt(password_hex, int_n, int_e, 128)
encrypted_password = hex2b64(format(int_encrypted_password, 'x'))
return encrypted_password
|
lazygunner/xunleipy | xunleipy/rsa_lib.py | modExp | python | def modExp(a, d, n):
assert d >= 0
assert n >= 0
base2D = int2baseTwo(d)
base2DLength = len(base2D)
modArray = []
result = 1
for i in range(1, base2DLength + 1):
if i == 1:
modArray.append(a % n)
else:
modArray.append((modArray[i - 2] ** 2) % n)
for i in range(0, base2DLength):
if base2D[i] == 1:
result *= base2D[i] * modArray[i]
return result % n | returns a ** d (mod n) | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rsa_lib.py#L79-L95 | [
"def int2baseTwo(x):\n \"\"\"x is a positive integer. Convert it to base two as a list of integers\n in reverse order as a list.\"\"\"\n # repeating x >>= 1 and x & 1 will do the trick\n assert x >= 0\n bitInverse = []\n while x != 0:\n bitInverse.append(x & 1)\n x >>= 1\n return ... | #!/usr/bin/env python
import copy
import math
import pickle
import random
from itertools import combinations
import js2py
import hashlib
import base64
def euclid(a, b):
"""returns the Greatest Common Divisor of a and b"""
a = abs(a)
b = abs(b)
if a < b:
a, b = b, a
while b != 0:
a, b = b, a % b
return a
def coPrime(l):
"""returns 'True' if the values in the list L are all co-prime
otherwise, it returns 'False'. """
for i, j in combinations(l, 2):
if euclid(i, j) != 1:
return False
return True
def extendedEuclid(a, b):
"""return a tuple of three values: x, y and z, such that x is
the GCD of a and b, and x = y * a + z * b"""
if a == 0:
return b, 0, 1
else:
g, y, x = extendedEuclid(b % a, a)
return g, x - (b // a) * y, y
def modInv(a, m):
"""returns the multiplicative inverse of a in modulo m as a
positive value between zero and m-1"""
# notice that a and m need to co-prime to each other.
if coPrime([a, m]):
linearCombination = extendedEuclid(a, m)
return linearCombination[1] % m
else:
return 0
def extractTwos(m):
"""m is a positive integer. A tuple (s, d) of integers is returned
such that m = (2 ** s) * d."""
# the problem can be break down to count how many '0's are there in
# the end of bin(m). This can be done this way: m & a stretch of '1's
# which can be represent as (2 ** n) - 1.
assert m >= 0
i = 0
while m & (2 ** i) == 0:
i += 1
return i, m >> i
def int2baseTwo(x):
"""x is a positive integer. Convert it to base two as a list of integers
in reverse order as a list."""
# repeating x >>= 1 and x & 1 will do the trick
assert x >= 0
bitInverse = []
while x != 0:
bitInverse.append(x & 1)
x >>= 1
return bitInverse
def millerRabin(n, k):
"""
Miller Rabin pseudo-prime test
return True means likely a prime, (how sure about that, depending on k)
return False means definitely a composite.
Raise assertion error when n, k are not positive integers
and n is not 1
"""
assert n >= 1
# ensure n is bigger than 1
assert k > 0
# ensure k is a positive integer so everything down here makes sense
if n == 2:
return True
# make sure to return True if n == 2
if n % 2 == 0:
return False
# immediately return False for all the even numbers bigger than 2
extract2 = extractTwos(n - 1)
s = extract2[0]
d = extract2[1]
assert 2 ** s * d == n - 1
def tryComposite(a):
"""Inner function which will inspect whether a given witness
will reveal the true identity of n. Will only be called within
millerRabin"""
x = modExp(a, d, n)
if x == 1 or x == n - 1:
return None
else:
for j in range(1, s):
x = modExp(x, 2, n)
if x == 1:
return False
elif x == n - 1:
return None
return False
for i in range(0, k):
a = random.randint(2, n - 2)
if tryComposite(a) == False:
return False
return True # actually, we should return probably true.
def primeSieve(k):
"""return a list with length k + 1, showing if list[i] == 1, i is a prime
else if list[i] == 0, i is a composite, if list[i] == -1, not defined"""
def isPrime(n):
"""return True is given number n is absolutely prime,
return False is otherwise."""
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
return False
return True
result = [-1] * (k + 1)
for i in range(2, int(k + 1)):
if isPrime(i):
result[i] = 1
else:
result[i] = 0
return result
def findAPrime(a, b, k):
"""Return a pseudo prime number roughly between a and b,
(could be larger than b). Raise ValueError if cannot find a
pseudo prime after 10 * ln(x) + 3 tries. """
x = random.randint(a, b)
for i in range(0, int(10 * math.log(x) + 3)):
if millerRabin(x, k):
return x
else:
x += 1
raise ValueError
def newKey(a, b, k):
""" Try to find two large pseudo primes roughly between a and b.
Generate public and private keys for RSA encryption.
Raises ValueError if it fails to find one"""
try:
p = findAPrime(a, b, k)
while True:
q = findAPrime(a, b, k)
if q != p:
break
except:
raise ValueError
n = p * q
m = (p - 1) * (q - 1)
while True:
e = random.randint(1, m)
if coPrime([e, m]):
break
d = modInv(e, m)
return (n, e, d)
def string2numList(strn):
"""Converts a string to a list of integers based on ASCII values"""
"""origin pickle has bug """
return [ord(chars) for chars in list(strn)]
def numList2string(l):
"""Converts a list of integers to a string based on ASCII values"""
return pickle.loads(''.join(map(chr, l)))
def numList2blocks(l, n):
"""Take a list of integers(each between 0 and 127), and combines them
into block size n using base 256. If len(L) % n != 0, use some random
junk to fill L to make it."""
# Note that ASCII printable characters range is 0x20 - 0x7E
returnList = []
toProcess = copy.copy(l)
''' copy message ascii list'''
if len(toProcess) % n != 0:
for i in range(0, n - len(toProcess) % n):
''' append rand str to list'''
toProcess.append(random.randint(32, 126))
toProcess[len(l)] = 0 # 0 after origin message list
''' generate int from ascii number list'''
for i in range(0, len(toProcess), n):
block = 0
for j in range(0, n):
block += toProcess[i + j] << (8 * (n - j - 1))
returnList.append(block)
return returnList
def blocks2numList(blocks, n):
"""inverse function of numList2blocks."""
toProcess = copy.copy(blocks)
returnList = []
for numBlock in toProcess:
inner = []
for i in range(0, n):
inner.append(numBlock % 256)
numBlock >>= 8
inner.reverse()
returnList.extend(inner)
return returnList
def encrypt(message, modN, e, blockSize):
"""given a string message, public keys and blockSize, encrypt using
RSA algorithms."""
numList = string2numList(message)
numBlocks = numList2blocks(numList, blockSize) # only one block
message = numBlocks[0]
# return [modExp(blocks, e, modN) for blocks in numBlocks]
return modExp(message, e, modN)
def decrypt(secret, modN, d, blockSize):
"""reverse function of encrypt"""
numBlocks = [modExp(blocks, d, modN) for blocks in secret]
numList = blocks2numList(numBlocks, blockSize)
return numList2string(numList)
def block_size(val):
v = int(val)
assert(v >= 10 and v <= 1000)
return val
# Use xunlei's base64 js code directly
hex2b64 = js2py.eval_js(
'''
function hex2b64(h) {
var b64map="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var b64padchar="=";
var i;
var c;
var ret = "";
for(i = 0; i+3 <= h.length; i+=3) {
c = parseInt(h.substring(i,i+3),16);
ret += b64map.charAt(c >> 6) + b64map.charAt(c & 63);
}
if(i+1 == h.length) {
c = parseInt(h.substring(i,i+1),16);
ret += b64map.charAt(c << 2);
}
else if(i+2 == h.length) {
c = parseInt(h.substring(i,i+2),16);
ret += b64map.charAt(c >> 2) + b64map.charAt((c & 3) << 4);
}
while((ret.length & 3) > 0) ret += b64padchar;
return ret;
}
'''
)
def rsa_encrypt_password(password, verify_code, check_n, check_e):
md5 = hashlib.md5()
md5.update(password)
password_hex = md5.hexdigest()
password_hex += verify_code.upper()
int_n = long(base64.decodestring(check_n).encode('hex'), 16)
int_e = long(base64.decodestring(check_e).encode('hex'), 16)
int_encrypted_password = encrypt(password_hex, int_n, int_e, 128)
encrypted_password = hex2b64(format(int_encrypted_password, 'x'))
return encrypted_password
|
lazygunner/xunleipy | xunleipy/rsa_lib.py | millerRabin | python | def millerRabin(n, k):
assert n >= 1
# ensure n is bigger than 1
assert k > 0
# ensure k is a positive integer so everything down here makes sense
if n == 2:
return True
# make sure to return True if n == 2
if n % 2 == 0:
return False
# immediately return False for all the even numbers bigger than 2
extract2 = extractTwos(n - 1)
s = extract2[0]
d = extract2[1]
assert 2 ** s * d == n - 1
def tryComposite(a):
"""Inner function which will inspect whether a given witness
will reveal the true identity of n. Will only be called within
millerRabin"""
x = modExp(a, d, n)
if x == 1 or x == n - 1:
return None
else:
for j in range(1, s):
x = modExp(x, 2, n)
if x == 1:
return False
elif x == n - 1:
return None
return False
for i in range(0, k):
a = random.randint(2, n - 2)
if tryComposite(a) == False:
return False
return True | Miller Rabin pseudo-prime test
return True means likely a prime, (how sure about that, depending on k)
return False means definitely a composite.
Raise assertion error when n, k are not positive integers
and n is not 1 | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rsa_lib.py#L98-L144 | [
"def extractTwos(m):\n \"\"\"m is a positive integer. A tuple (s, d) of integers is returned\n such that m = (2 ** s) * d.\"\"\"\n # the problem can be break down to count how many '0's are there in\n # the end of bin(m). This can be done this way: m & a stretch of '1's\n # which can be represent as ... | #!/usr/bin/env python
import copy
import math
import pickle
import random
from itertools import combinations
import js2py
import hashlib
import base64
def euclid(a, b):
"""returns the Greatest Common Divisor of a and b"""
a = abs(a)
b = abs(b)
if a < b:
a, b = b, a
while b != 0:
a, b = b, a % b
return a
def coPrime(l):
"""returns 'True' if the values in the list L are all co-prime
otherwise, it returns 'False'. """
for i, j in combinations(l, 2):
if euclid(i, j) != 1:
return False
return True
def extendedEuclid(a, b):
"""return a tuple of three values: x, y and z, such that x is
the GCD of a and b, and x = y * a + z * b"""
if a == 0:
return b, 0, 1
else:
g, y, x = extendedEuclid(b % a, a)
return g, x - (b // a) * y, y
def modInv(a, m):
"""returns the multiplicative inverse of a in modulo m as a
positive value between zero and m-1"""
# notice that a and m need to co-prime to each other.
if coPrime([a, m]):
linearCombination = extendedEuclid(a, m)
return linearCombination[1] % m
else:
return 0
def extractTwos(m):
"""m is a positive integer. A tuple (s, d) of integers is returned
such that m = (2 ** s) * d."""
# the problem can be break down to count how many '0's are there in
# the end of bin(m). This can be done this way: m & a stretch of '1's
# which can be represent as (2 ** n) - 1.
assert m >= 0
i = 0
while m & (2 ** i) == 0:
i += 1
return i, m >> i
def int2baseTwo(x):
"""x is a positive integer. Convert it to base two as a list of integers
in reverse order as a list."""
# repeating x >>= 1 and x & 1 will do the trick
assert x >= 0
bitInverse = []
while x != 0:
bitInverse.append(x & 1)
x >>= 1
return bitInverse
def modExp(a, d, n):
"""returns a ** d (mod n)"""
assert d >= 0
assert n >= 0
base2D = int2baseTwo(d)
base2DLength = len(base2D)
modArray = []
result = 1
for i in range(1, base2DLength + 1):
if i == 1:
modArray.append(a % n)
else:
modArray.append((modArray[i - 2] ** 2) % n)
for i in range(0, base2DLength):
if base2D[i] == 1:
result *= base2D[i] * modArray[i]
return result % n
# actually, we should return probably true.
def primeSieve(k):
"""return a list with length k + 1, showing if list[i] == 1, i is a prime
else if list[i] == 0, i is a composite, if list[i] == -1, not defined"""
def isPrime(n):
"""return True is given number n is absolutely prime,
return False is otherwise."""
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
return False
return True
result = [-1] * (k + 1)
for i in range(2, int(k + 1)):
if isPrime(i):
result[i] = 1
else:
result[i] = 0
return result
def findAPrime(a, b, k):
"""Return a pseudo prime number roughly between a and b,
(could be larger than b). Raise ValueError if cannot find a
pseudo prime after 10 * ln(x) + 3 tries. """
x = random.randint(a, b)
for i in range(0, int(10 * math.log(x) + 3)):
if millerRabin(x, k):
return x
else:
x += 1
raise ValueError
def newKey(a, b, k):
""" Try to find two large pseudo primes roughly between a and b.
Generate public and private keys for RSA encryption.
Raises ValueError if it fails to find one"""
try:
p = findAPrime(a, b, k)
while True:
q = findAPrime(a, b, k)
if q != p:
break
except:
raise ValueError
n = p * q
m = (p - 1) * (q - 1)
while True:
e = random.randint(1, m)
if coPrime([e, m]):
break
d = modInv(e, m)
return (n, e, d)
def string2numList(strn):
"""Converts a string to a list of integers based on ASCII values"""
"""origin pickle has bug """
return [ord(chars) for chars in list(strn)]
def numList2string(l):
"""Converts a list of integers to a string based on ASCII values"""
return pickle.loads(''.join(map(chr, l)))
def numList2blocks(l, n):
"""Take a list of integers(each between 0 and 127), and combines them
into block size n using base 256. If len(L) % n != 0, use some random
junk to fill L to make it."""
# Note that ASCII printable characters range is 0x20 - 0x7E
returnList = []
toProcess = copy.copy(l)
''' copy message ascii list'''
if len(toProcess) % n != 0:
for i in range(0, n - len(toProcess) % n):
''' append rand str to list'''
toProcess.append(random.randint(32, 126))
toProcess[len(l)] = 0 # 0 after origin message list
''' generate int from ascii number list'''
for i in range(0, len(toProcess), n):
block = 0
for j in range(0, n):
block += toProcess[i + j] << (8 * (n - j - 1))
returnList.append(block)
return returnList
def blocks2numList(blocks, n):
"""inverse function of numList2blocks."""
toProcess = copy.copy(blocks)
returnList = []
for numBlock in toProcess:
inner = []
for i in range(0, n):
inner.append(numBlock % 256)
numBlock >>= 8
inner.reverse()
returnList.extend(inner)
return returnList
def encrypt(message, modN, e, blockSize):
"""given a string message, public keys and blockSize, encrypt using
RSA algorithms."""
numList = string2numList(message)
numBlocks = numList2blocks(numList, blockSize) # only one block
message = numBlocks[0]
# return [modExp(blocks, e, modN) for blocks in numBlocks]
return modExp(message, e, modN)
def decrypt(secret, modN, d, blockSize):
"""reverse function of encrypt"""
numBlocks = [modExp(blocks, d, modN) for blocks in secret]
numList = blocks2numList(numBlocks, blockSize)
return numList2string(numList)
def block_size(val):
v = int(val)
assert(v >= 10 and v <= 1000)
return val
# Use xunlei's base64 js code directly
hex2b64 = js2py.eval_js(
'''
function hex2b64(h) {
var b64map="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var b64padchar="=";
var i;
var c;
var ret = "";
for(i = 0; i+3 <= h.length; i+=3) {
c = parseInt(h.substring(i,i+3),16);
ret += b64map.charAt(c >> 6) + b64map.charAt(c & 63);
}
if(i+1 == h.length) {
c = parseInt(h.substring(i,i+1),16);
ret += b64map.charAt(c << 2);
}
else if(i+2 == h.length) {
c = parseInt(h.substring(i,i+2),16);
ret += b64map.charAt(c >> 2) + b64map.charAt((c & 3) << 4);
}
while((ret.length & 3) > 0) ret += b64padchar;
return ret;
}
'''
)
def rsa_encrypt_password(password, verify_code, check_n, check_e):
md5 = hashlib.md5()
md5.update(password)
password_hex = md5.hexdigest()
password_hex += verify_code.upper()
int_n = long(base64.decodestring(check_n).encode('hex'), 16)
int_e = long(base64.decodestring(check_e).encode('hex'), 16)
int_encrypted_password = encrypt(password_hex, int_n, int_e, 128)
encrypted_password = hex2b64(format(int_encrypted_password, 'x'))
return encrypted_password
|
lazygunner/xunleipy | xunleipy/rsa_lib.py | primeSieve | python | def primeSieve(k):
def isPrime(n):
"""return True is given number n is absolutely prime,
return False is otherwise."""
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
return False
return True
result = [-1] * (k + 1)
for i in range(2, int(k + 1)):
if isPrime(i):
result[i] = 1
else:
result[i] = 0
return result | return a list with length k + 1, showing if list[i] == 1, i is a prime
else if list[i] == 0, i is a composite, if list[i] == -1, not defined | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rsa_lib.py#L147-L164 | [
"def isPrime(n):\n \"\"\"return True is given number n is absolutely prime,\n return False is otherwise.\"\"\"\n for i in range(2, int(n ** 0.5) + 1):\n if n % i == 0:\n return False\n return True\n"
] | #!/usr/bin/env python
import copy
import math
import pickle
import random
from itertools import combinations
import js2py
import hashlib
import base64
def euclid(a, b):
"""returns the Greatest Common Divisor of a and b"""
a = abs(a)
b = abs(b)
if a < b:
a, b = b, a
while b != 0:
a, b = b, a % b
return a
def coPrime(l):
"""returns 'True' if the values in the list L are all co-prime
otherwise, it returns 'False'. """
for i, j in combinations(l, 2):
if euclid(i, j) != 1:
return False
return True
def extendedEuclid(a, b):
"""return a tuple of three values: x, y and z, such that x is
the GCD of a and b, and x = y * a + z * b"""
if a == 0:
return b, 0, 1
else:
g, y, x = extendedEuclid(b % a, a)
return g, x - (b // a) * y, y
def modInv(a, m):
"""returns the multiplicative inverse of a in modulo m as a
positive value between zero and m-1"""
# notice that a and m need to co-prime to each other.
if coPrime([a, m]):
linearCombination = extendedEuclid(a, m)
return linearCombination[1] % m
else:
return 0
def extractTwos(m):
"""m is a positive integer. A tuple (s, d) of integers is returned
such that m = (2 ** s) * d."""
# the problem can be break down to count how many '0's are there in
# the end of bin(m). This can be done this way: m & a stretch of '1's
# which can be represent as (2 ** n) - 1.
assert m >= 0
i = 0
while m & (2 ** i) == 0:
i += 1
return i, m >> i
def int2baseTwo(x):
"""x is a positive integer. Convert it to base two as a list of integers
in reverse order as a list."""
# repeating x >>= 1 and x & 1 will do the trick
assert x >= 0
bitInverse = []
while x != 0:
bitInverse.append(x & 1)
x >>= 1
return bitInverse
def modExp(a, d, n):
"""returns a ** d (mod n)"""
assert d >= 0
assert n >= 0
base2D = int2baseTwo(d)
base2DLength = len(base2D)
modArray = []
result = 1
for i in range(1, base2DLength + 1):
if i == 1:
modArray.append(a % n)
else:
modArray.append((modArray[i - 2] ** 2) % n)
for i in range(0, base2DLength):
if base2D[i] == 1:
result *= base2D[i] * modArray[i]
return result % n
def millerRabin(n, k):
"""
Miller Rabin pseudo-prime test
return True means likely a prime, (how sure about that, depending on k)
return False means definitely a composite.
Raise assertion error when n, k are not positive integers
and n is not 1
"""
assert n >= 1
# ensure n is bigger than 1
assert k > 0
# ensure k is a positive integer so everything down here makes sense
if n == 2:
return True
# make sure to return True if n == 2
if n % 2 == 0:
return False
# immediately return False for all the even numbers bigger than 2
extract2 = extractTwos(n - 1)
s = extract2[0]
d = extract2[1]
assert 2 ** s * d == n - 1
def tryComposite(a):
"""Inner function which will inspect whether a given witness
will reveal the true identity of n. Will only be called within
millerRabin"""
x = modExp(a, d, n)
if x == 1 or x == n - 1:
return None
else:
for j in range(1, s):
x = modExp(x, 2, n)
if x == 1:
return False
elif x == n - 1:
return None
return False
for i in range(0, k):
a = random.randint(2, n - 2)
if tryComposite(a) == False:
return False
return True # actually, we should return probably true.
def findAPrime(a, b, k):
"""Return a pseudo prime number roughly between a and b,
(could be larger than b). Raise ValueError if cannot find a
pseudo prime after 10 * ln(x) + 3 tries. """
x = random.randint(a, b)
for i in range(0, int(10 * math.log(x) + 3)):
if millerRabin(x, k):
return x
else:
x += 1
raise ValueError
def newKey(a, b, k):
""" Try to find two large pseudo primes roughly between a and b.
Generate public and private keys for RSA encryption.
Raises ValueError if it fails to find one"""
try:
p = findAPrime(a, b, k)
while True:
q = findAPrime(a, b, k)
if q != p:
break
except:
raise ValueError
n = p * q
m = (p - 1) * (q - 1)
while True:
e = random.randint(1, m)
if coPrime([e, m]):
break
d = modInv(e, m)
return (n, e, d)
def string2numList(strn):
"""Converts a string to a list of integers based on ASCII values"""
"""origin pickle has bug """
return [ord(chars) for chars in list(strn)]
def numList2string(l):
"""Converts a list of integers to a string based on ASCII values"""
return pickle.loads(''.join(map(chr, l)))
def numList2blocks(l, n):
"""Take a list of integers(each between 0 and 127), and combines them
into block size n using base 256. If len(L) % n != 0, use some random
junk to fill L to make it."""
# Note that ASCII printable characters range is 0x20 - 0x7E
returnList = []
toProcess = copy.copy(l)
''' copy message ascii list'''
if len(toProcess) % n != 0:
for i in range(0, n - len(toProcess) % n):
''' append rand str to list'''
toProcess.append(random.randint(32, 126))
toProcess[len(l)] = 0 # 0 after origin message list
''' generate int from ascii number list'''
for i in range(0, len(toProcess), n):
block = 0
for j in range(0, n):
block += toProcess[i + j] << (8 * (n - j - 1))
returnList.append(block)
return returnList
def blocks2numList(blocks, n):
"""inverse function of numList2blocks."""
toProcess = copy.copy(blocks)
returnList = []
for numBlock in toProcess:
inner = []
for i in range(0, n):
inner.append(numBlock % 256)
numBlock >>= 8
inner.reverse()
returnList.extend(inner)
return returnList
def encrypt(message, modN, e, blockSize):
"""given a string message, public keys and blockSize, encrypt using
RSA algorithms."""
numList = string2numList(message)
numBlocks = numList2blocks(numList, blockSize) # only one block
message = numBlocks[0]
# return [modExp(blocks, e, modN) for blocks in numBlocks]
return modExp(message, e, modN)
def decrypt(secret, modN, d, blockSize):
"""reverse function of encrypt"""
numBlocks = [modExp(blocks, d, modN) for blocks in secret]
numList = blocks2numList(numBlocks, blockSize)
return numList2string(numList)
def block_size(val):
v = int(val)
assert(v >= 10 and v <= 1000)
return val
# Use xunlei's base64 js code directly
hex2b64 = js2py.eval_js(
'''
function hex2b64(h) {
var b64map="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var b64padchar="=";
var i;
var c;
var ret = "";
for(i = 0; i+3 <= h.length; i+=3) {
c = parseInt(h.substring(i,i+3),16);
ret += b64map.charAt(c >> 6) + b64map.charAt(c & 63);
}
if(i+1 == h.length) {
c = parseInt(h.substring(i,i+1),16);
ret += b64map.charAt(c << 2);
}
else if(i+2 == h.length) {
c = parseInt(h.substring(i,i+2),16);
ret += b64map.charAt(c >> 2) + b64map.charAt((c & 3) << 4);
}
while((ret.length & 3) > 0) ret += b64padchar;
return ret;
}
'''
)
def rsa_encrypt_password(password, verify_code, check_n, check_e):
md5 = hashlib.md5()
md5.update(password)
password_hex = md5.hexdigest()
password_hex += verify_code.upper()
int_n = long(base64.decodestring(check_n).encode('hex'), 16)
int_e = long(base64.decodestring(check_e).encode('hex'), 16)
int_encrypted_password = encrypt(password_hex, int_n, int_e, 128)
encrypted_password = hex2b64(format(int_encrypted_password, 'x'))
return encrypted_password
|
lazygunner/xunleipy | xunleipy/rsa_lib.py | findAPrime | python | def findAPrime(a, b, k):
x = random.randint(a, b)
for i in range(0, int(10 * math.log(x) + 3)):
if millerRabin(x, k):
return x
else:
x += 1
raise ValueError | Return a pseudo prime number roughly between a and b,
(could be larger than b). Raise ValueError if cannot find a
pseudo prime after 10 * ln(x) + 3 tries. | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rsa_lib.py#L167-L177 | [
"def millerRabin(n, k):\n \"\"\"\n Miller Rabin pseudo-prime test\n return True means likely a prime, (how sure about that, depending on k)\n return False means definitely a composite.\n Raise assertion error when n, k are not positive integers\n and n is not 1\n \"\"\"\n assert n >= 1\n ... | #!/usr/bin/env python
import copy
import math
import pickle
import random
from itertools import combinations
import js2py
import hashlib
import base64
def euclid(a, b):
"""returns the Greatest Common Divisor of a and b"""
a = abs(a)
b = abs(b)
if a < b:
a, b = b, a
while b != 0:
a, b = b, a % b
return a
def coPrime(l):
"""returns 'True' if the values in the list L are all co-prime
otherwise, it returns 'False'. """
for i, j in combinations(l, 2):
if euclid(i, j) != 1:
return False
return True
def extendedEuclid(a, b):
"""return a tuple of three values: x, y and z, such that x is
the GCD of a and b, and x = y * a + z * b"""
if a == 0:
return b, 0, 1
else:
g, y, x = extendedEuclid(b % a, a)
return g, x - (b // a) * y, y
def modInv(a, m):
"""returns the multiplicative inverse of a in modulo m as a
positive value between zero and m-1"""
# notice that a and m need to co-prime to each other.
if coPrime([a, m]):
linearCombination = extendedEuclid(a, m)
return linearCombination[1] % m
else:
return 0
def extractTwos(m):
"""m is a positive integer. A tuple (s, d) of integers is returned
such that m = (2 ** s) * d."""
# the problem can be break down to count how many '0's are there in
# the end of bin(m). This can be done this way: m & a stretch of '1's
# which can be represent as (2 ** n) - 1.
assert m >= 0
i = 0
while m & (2 ** i) == 0:
i += 1
return i, m >> i
def int2baseTwo(x):
"""x is a positive integer. Convert it to base two as a list of integers
in reverse order as a list."""
# repeating x >>= 1 and x & 1 will do the trick
assert x >= 0
bitInverse = []
while x != 0:
bitInverse.append(x & 1)
x >>= 1
return bitInverse
def modExp(a, d, n):
"""returns a ** d (mod n)"""
assert d >= 0
assert n >= 0
base2D = int2baseTwo(d)
base2DLength = len(base2D)
modArray = []
result = 1
for i in range(1, base2DLength + 1):
if i == 1:
modArray.append(a % n)
else:
modArray.append((modArray[i - 2] ** 2) % n)
for i in range(0, base2DLength):
if base2D[i] == 1:
result *= base2D[i] * modArray[i]
return result % n
def millerRabin(n, k):
"""
Miller Rabin pseudo-prime test
return True means likely a prime, (how sure about that, depending on k)
return False means definitely a composite.
Raise assertion error when n, k are not positive integers
and n is not 1
"""
assert n >= 1
# ensure n is bigger than 1
assert k > 0
# ensure k is a positive integer so everything down here makes sense
if n == 2:
return True
# make sure to return True if n == 2
if n % 2 == 0:
return False
# immediately return False for all the even numbers bigger than 2
extract2 = extractTwos(n - 1)
s = extract2[0]
d = extract2[1]
assert 2 ** s * d == n - 1
def tryComposite(a):
"""Inner function which will inspect whether a given witness
will reveal the true identity of n. Will only be called within
millerRabin"""
x = modExp(a, d, n)
if x == 1 or x == n - 1:
return None
else:
for j in range(1, s):
x = modExp(x, 2, n)
if x == 1:
return False
elif x == n - 1:
return None
return False
for i in range(0, k):
a = random.randint(2, n - 2)
if tryComposite(a) == False:
return False
return True # actually, we should return probably true.
def primeSieve(k):
"""return a list with length k + 1, showing if list[i] == 1, i is a prime
else if list[i] == 0, i is a composite, if list[i] == -1, not defined"""
def isPrime(n):
"""return True is given number n is absolutely prime,
return False is otherwise."""
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
return False
return True
result = [-1] * (k + 1)
for i in range(2, int(k + 1)):
if isPrime(i):
result[i] = 1
else:
result[i] = 0
return result
def newKey(a, b, k):
""" Try to find two large pseudo primes roughly between a and b.
Generate public and private keys for RSA encryption.
Raises ValueError if it fails to find one"""
try:
p = findAPrime(a, b, k)
while True:
q = findAPrime(a, b, k)
if q != p:
break
except:
raise ValueError
n = p * q
m = (p - 1) * (q - 1)
while True:
e = random.randint(1, m)
if coPrime([e, m]):
break
d = modInv(e, m)
return (n, e, d)
def string2numList(strn):
"""Converts a string to a list of integers based on ASCII values"""
"""origin pickle has bug """
return [ord(chars) for chars in list(strn)]
def numList2string(l):
"""Converts a list of integers to a string based on ASCII values"""
return pickle.loads(''.join(map(chr, l)))
def numList2blocks(l, n):
"""Take a list of integers(each between 0 and 127), and combines them
into block size n using base 256. If len(L) % n != 0, use some random
junk to fill L to make it."""
# Note that ASCII printable characters range is 0x20 - 0x7E
returnList = []
toProcess = copy.copy(l)
''' copy message ascii list'''
if len(toProcess) % n != 0:
for i in range(0, n - len(toProcess) % n):
''' append rand str to list'''
toProcess.append(random.randint(32, 126))
toProcess[len(l)] = 0 # 0 after origin message list
''' generate int from ascii number list'''
for i in range(0, len(toProcess), n):
block = 0
for j in range(0, n):
block += toProcess[i + j] << (8 * (n - j - 1))
returnList.append(block)
return returnList
def blocks2numList(blocks, n):
"""inverse function of numList2blocks."""
toProcess = copy.copy(blocks)
returnList = []
for numBlock in toProcess:
inner = []
for i in range(0, n):
inner.append(numBlock % 256)
numBlock >>= 8
inner.reverse()
returnList.extend(inner)
return returnList
def encrypt(message, modN, e, blockSize):
"""given a string message, public keys and blockSize, encrypt using
RSA algorithms."""
numList = string2numList(message)
numBlocks = numList2blocks(numList, blockSize) # only one block
message = numBlocks[0]
# return [modExp(blocks, e, modN) for blocks in numBlocks]
return modExp(message, e, modN)
def decrypt(secret, modN, d, blockSize):
"""reverse function of encrypt"""
numBlocks = [modExp(blocks, d, modN) for blocks in secret]
numList = blocks2numList(numBlocks, blockSize)
return numList2string(numList)
def block_size(val):
v = int(val)
assert(v >= 10 and v <= 1000)
return val
# Use xunlei's base64 js code directly
hex2b64 = js2py.eval_js(
'''
function hex2b64(h) {
var b64map="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var b64padchar="=";
var i;
var c;
var ret = "";
for(i = 0; i+3 <= h.length; i+=3) {
c = parseInt(h.substring(i,i+3),16);
ret += b64map.charAt(c >> 6) + b64map.charAt(c & 63);
}
if(i+1 == h.length) {
c = parseInt(h.substring(i,i+1),16);
ret += b64map.charAt(c << 2);
}
else if(i+2 == h.length) {
c = parseInt(h.substring(i,i+2),16);
ret += b64map.charAt(c >> 2) + b64map.charAt((c & 3) << 4);
}
while((ret.length & 3) > 0) ret += b64padchar;
return ret;
}
'''
)
def rsa_encrypt_password(password, verify_code, check_n, check_e):
md5 = hashlib.md5()
md5.update(password)
password_hex = md5.hexdigest()
password_hex += verify_code.upper()
int_n = long(base64.decodestring(check_n).encode('hex'), 16)
int_e = long(base64.decodestring(check_e).encode('hex'), 16)
int_encrypted_password = encrypt(password_hex, int_n, int_e, 128)
encrypted_password = hex2b64(format(int_encrypted_password, 'x'))
return encrypted_password
|
lazygunner/xunleipy | xunleipy/rsa_lib.py | newKey | python | def newKey(a, b, k):
try:
p = findAPrime(a, b, k)
while True:
q = findAPrime(a, b, k)
if q != p:
break
except:
raise ValueError
n = p * q
m = (p - 1) * (q - 1)
while True:
e = random.randint(1, m)
if coPrime([e, m]):
break
d = modInv(e, m)
return (n, e, d) | Try to find two large pseudo primes roughly between a and b.
Generate public and private keys for RSA encryption.
Raises ValueError if it fails to find one | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rsa_lib.py#L180-L202 | [
"def findAPrime(a, b, k):\n \"\"\"Return a pseudo prime number roughly between a and b,\n (could be larger than b). Raise ValueError if cannot find a\n pseudo prime after 10 * ln(x) + 3 tries. \"\"\"\n x = random.randint(a, b)\n for i in range(0, int(10 * math.log(x) + 3)):\n if millerRabin(x,... | #!/usr/bin/env python
import copy
import math
import pickle
import random
from itertools import combinations
import js2py
import hashlib
import base64
def euclid(a, b):
"""returns the Greatest Common Divisor of a and b"""
a = abs(a)
b = abs(b)
if a < b:
a, b = b, a
while b != 0:
a, b = b, a % b
return a
def coPrime(l):
"""returns 'True' if the values in the list L are all co-prime
otherwise, it returns 'False'. """
for i, j in combinations(l, 2):
if euclid(i, j) != 1:
return False
return True
def extendedEuclid(a, b):
"""return a tuple of three values: x, y and z, such that x is
the GCD of a and b, and x = y * a + z * b"""
if a == 0:
return b, 0, 1
else:
g, y, x = extendedEuclid(b % a, a)
return g, x - (b // a) * y, y
def modInv(a, m):
"""returns the multiplicative inverse of a in modulo m as a
positive value between zero and m-1"""
# notice that a and m need to co-prime to each other.
if coPrime([a, m]):
linearCombination = extendedEuclid(a, m)
return linearCombination[1] % m
else:
return 0
def extractTwos(m):
"""m is a positive integer. A tuple (s, d) of integers is returned
such that m = (2 ** s) * d."""
# the problem can be break down to count how many '0's are there in
# the end of bin(m). This can be done this way: m & a stretch of '1's
# which can be represent as (2 ** n) - 1.
assert m >= 0
i = 0
while m & (2 ** i) == 0:
i += 1
return i, m >> i
def int2baseTwo(x):
"""x is a positive integer. Convert it to base two as a list of integers
in reverse order as a list."""
# repeating x >>= 1 and x & 1 will do the trick
assert x >= 0
bitInverse = []
while x != 0:
bitInverse.append(x & 1)
x >>= 1
return bitInverse
def modExp(a, d, n):
"""returns a ** d (mod n)"""
assert d >= 0
assert n >= 0
base2D = int2baseTwo(d)
base2DLength = len(base2D)
modArray = []
result = 1
for i in range(1, base2DLength + 1):
if i == 1:
modArray.append(a % n)
else:
modArray.append((modArray[i - 2] ** 2) % n)
for i in range(0, base2DLength):
if base2D[i] == 1:
result *= base2D[i] * modArray[i]
return result % n
def millerRabin(n, k):
"""
Miller Rabin pseudo-prime test
return True means likely a prime, (how sure about that, depending on k)
return False means definitely a composite.
Raise assertion error when n, k are not positive integers
and n is not 1
"""
assert n >= 1
# ensure n is bigger than 1
assert k > 0
# ensure k is a positive integer so everything down here makes sense
if n == 2:
return True
# make sure to return True if n == 2
if n % 2 == 0:
return False
# immediately return False for all the even numbers bigger than 2
extract2 = extractTwos(n - 1)
s = extract2[0]
d = extract2[1]
assert 2 ** s * d == n - 1
def tryComposite(a):
"""Inner function which will inspect whether a given witness
will reveal the true identity of n. Will only be called within
millerRabin"""
x = modExp(a, d, n)
if x == 1 or x == n - 1:
return None
else:
for j in range(1, s):
x = modExp(x, 2, n)
if x == 1:
return False
elif x == n - 1:
return None
return False
for i in range(0, k):
a = random.randint(2, n - 2)
if tryComposite(a) == False:
return False
return True # actually, we should return probably true.
def primeSieve(k):
"""return a list with length k + 1, showing if list[i] == 1, i is a prime
else if list[i] == 0, i is a composite, if list[i] == -1, not defined"""
def isPrime(n):
"""return True is given number n is absolutely prime,
return False is otherwise."""
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
return False
return True
result = [-1] * (k + 1)
for i in range(2, int(k + 1)):
if isPrime(i):
result[i] = 1
else:
result[i] = 0
return result
def findAPrime(a, b, k):
"""Return a pseudo prime number roughly between a and b,
(could be larger than b). Raise ValueError if cannot find a
pseudo prime after 10 * ln(x) + 3 tries. """
x = random.randint(a, b)
for i in range(0, int(10 * math.log(x) + 3)):
if millerRabin(x, k):
return x
else:
x += 1
raise ValueError
def string2numList(strn):
"""Converts a string to a list of integers based on ASCII values"""
"""origin pickle has bug """
return [ord(chars) for chars in list(strn)]
def numList2string(l):
"""Converts a list of integers to a string based on ASCII values"""
return pickle.loads(''.join(map(chr, l)))
def numList2blocks(l, n):
"""Take a list of integers(each between 0 and 127), and combines them
into block size n using base 256. If len(L) % n != 0, use some random
junk to fill L to make it."""
# Note that ASCII printable characters range is 0x20 - 0x7E
returnList = []
toProcess = copy.copy(l)
''' copy message ascii list'''
if len(toProcess) % n != 0:
for i in range(0, n - len(toProcess) % n):
''' append rand str to list'''
toProcess.append(random.randint(32, 126))
toProcess[len(l)] = 0 # 0 after origin message list
''' generate int from ascii number list'''
for i in range(0, len(toProcess), n):
block = 0
for j in range(0, n):
block += toProcess[i + j] << (8 * (n - j - 1))
returnList.append(block)
return returnList
def blocks2numList(blocks, n):
"""inverse function of numList2blocks."""
toProcess = copy.copy(blocks)
returnList = []
for numBlock in toProcess:
inner = []
for i in range(0, n):
inner.append(numBlock % 256)
numBlock >>= 8
inner.reverse()
returnList.extend(inner)
return returnList
def encrypt(message, modN, e, blockSize):
"""given a string message, public keys and blockSize, encrypt using
RSA algorithms."""
numList = string2numList(message)
numBlocks = numList2blocks(numList, blockSize) # only one block
message = numBlocks[0]
# return [modExp(blocks, e, modN) for blocks in numBlocks]
return modExp(message, e, modN)
def decrypt(secret, modN, d, blockSize):
"""reverse function of encrypt"""
numBlocks = [modExp(blocks, d, modN) for blocks in secret]
numList = blocks2numList(numBlocks, blockSize)
return numList2string(numList)
def block_size(val):
v = int(val)
assert(v >= 10 and v <= 1000)
return val
# Use xunlei's base64 js code directly
hex2b64 = js2py.eval_js(
'''
function hex2b64(h) {
var b64map="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var b64padchar="=";
var i;
var c;
var ret = "";
for(i = 0; i+3 <= h.length; i+=3) {
c = parseInt(h.substring(i,i+3),16);
ret += b64map.charAt(c >> 6) + b64map.charAt(c & 63);
}
if(i+1 == h.length) {
c = parseInt(h.substring(i,i+1),16);
ret += b64map.charAt(c << 2);
}
else if(i+2 == h.length) {
c = parseInt(h.substring(i,i+2),16);
ret += b64map.charAt(c >> 2) + b64map.charAt((c & 3) << 4);
}
while((ret.length & 3) > 0) ret += b64padchar;
return ret;
}
'''
)
def rsa_encrypt_password(password, verify_code, check_n, check_e):
md5 = hashlib.md5()
md5.update(password)
password_hex = md5.hexdigest()
password_hex += verify_code.upper()
int_n = long(base64.decodestring(check_n).encode('hex'), 16)
int_e = long(base64.decodestring(check_e).encode('hex'), 16)
int_encrypted_password = encrypt(password_hex, int_n, int_e, 128)
encrypted_password = hex2b64(format(int_encrypted_password, 'x'))
return encrypted_password
|
lazygunner/xunleipy | xunleipy/rsa_lib.py | numList2blocks | python | def numList2blocks(l, n):
# Note that ASCII printable characters range is 0x20 - 0x7E
returnList = []
toProcess = copy.copy(l)
''' copy message ascii list'''
if len(toProcess) % n != 0:
for i in range(0, n - len(toProcess) % n):
''' append rand str to list'''
toProcess.append(random.randint(32, 126))
toProcess[len(l)] = 0 # 0 after origin message list
''' generate int from ascii number list'''
for i in range(0, len(toProcess), n):
block = 0
for j in range(0, n):
block += toProcess[i + j] << (8 * (n - j - 1))
returnList.append(block)
return returnList | Take a list of integers(each between 0 and 127), and combines them
into block size n using base 256. If len(L) % n != 0, use some random
junk to fill L to make it. | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rsa_lib.py#L216-L236 | null | #!/usr/bin/env python
import copy
import math
import pickle
import random
from itertools import combinations
import js2py
import hashlib
import base64
def euclid(a, b):
"""returns the Greatest Common Divisor of a and b"""
a = abs(a)
b = abs(b)
if a < b:
a, b = b, a
while b != 0:
a, b = b, a % b
return a
def coPrime(l):
"""returns 'True' if the values in the list L are all co-prime
otherwise, it returns 'False'. """
for i, j in combinations(l, 2):
if euclid(i, j) != 1:
return False
return True
def extendedEuclid(a, b):
"""return a tuple of three values: x, y and z, such that x is
the GCD of a and b, and x = y * a + z * b"""
if a == 0:
return b, 0, 1
else:
g, y, x = extendedEuclid(b % a, a)
return g, x - (b // a) * y, y
def modInv(a, m):
"""returns the multiplicative inverse of a in modulo m as a
positive value between zero and m-1"""
# notice that a and m need to co-prime to each other.
if coPrime([a, m]):
linearCombination = extendedEuclid(a, m)
return linearCombination[1] % m
else:
return 0
def extractTwos(m):
"""m is a positive integer. A tuple (s, d) of integers is returned
such that m = (2 ** s) * d."""
# the problem can be break down to count how many '0's are there in
# the end of bin(m). This can be done this way: m & a stretch of '1's
# which can be represent as (2 ** n) - 1.
assert m >= 0
i = 0
while m & (2 ** i) == 0:
i += 1
return i, m >> i
def int2baseTwo(x):
"""x is a positive integer. Convert it to base two as a list of integers
in reverse order as a list."""
# repeating x >>= 1 and x & 1 will do the trick
assert x >= 0
bitInverse = []
while x != 0:
bitInverse.append(x & 1)
x >>= 1
return bitInverse
def modExp(a, d, n):
"""returns a ** d (mod n)"""
assert d >= 0
assert n >= 0
base2D = int2baseTwo(d)
base2DLength = len(base2D)
modArray = []
result = 1
for i in range(1, base2DLength + 1):
if i == 1:
modArray.append(a % n)
else:
modArray.append((modArray[i - 2] ** 2) % n)
for i in range(0, base2DLength):
if base2D[i] == 1:
result *= base2D[i] * modArray[i]
return result % n
def millerRabin(n, k):
"""
Miller Rabin pseudo-prime test
return True means likely a prime, (how sure about that, depending on k)
return False means definitely a composite.
Raise assertion error when n, k are not positive integers
and n is not 1
"""
assert n >= 1
# ensure n is bigger than 1
assert k > 0
# ensure k is a positive integer so everything down here makes sense
if n == 2:
return True
# make sure to return True if n == 2
if n % 2 == 0:
return False
# immediately return False for all the even numbers bigger than 2
extract2 = extractTwos(n - 1)
s = extract2[0]
d = extract2[1]
assert 2 ** s * d == n - 1
def tryComposite(a):
"""Inner function which will inspect whether a given witness
will reveal the true identity of n. Will only be called within
millerRabin"""
x = modExp(a, d, n)
if x == 1 or x == n - 1:
return None
else:
for j in range(1, s):
x = modExp(x, 2, n)
if x == 1:
return False
elif x == n - 1:
return None
return False
for i in range(0, k):
a = random.randint(2, n - 2)
if tryComposite(a) == False:
return False
return True # actually, we should return probably true.
def primeSieve(k):
"""return a list with length k + 1, showing if list[i] == 1, i is a prime
else if list[i] == 0, i is a composite, if list[i] == -1, not defined"""
def isPrime(n):
"""return True is given number n is absolutely prime,
return False is otherwise."""
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
return False
return True
result = [-1] * (k + 1)
for i in range(2, int(k + 1)):
if isPrime(i):
result[i] = 1
else:
result[i] = 0
return result
def findAPrime(a, b, k):
"""Return a pseudo prime number roughly between a and b,
(could be larger than b). Raise ValueError if cannot find a
pseudo prime after 10 * ln(x) + 3 tries. """
x = random.randint(a, b)
for i in range(0, int(10 * math.log(x) + 3)):
if millerRabin(x, k):
return x
else:
x += 1
raise ValueError
def newKey(a, b, k):
""" Try to find two large pseudo primes roughly between a and b.
Generate public and private keys for RSA encryption.
Raises ValueError if it fails to find one"""
try:
p = findAPrime(a, b, k)
while True:
q = findAPrime(a, b, k)
if q != p:
break
except:
raise ValueError
n = p * q
m = (p - 1) * (q - 1)
while True:
e = random.randint(1, m)
if coPrime([e, m]):
break
d = modInv(e, m)
return (n, e, d)
def string2numList(strn):
"""Converts a string to a list of integers based on ASCII values"""
"""origin pickle has bug """
return [ord(chars) for chars in list(strn)]
def numList2string(l):
"""Converts a list of integers to a string based on ASCII values"""
return pickle.loads(''.join(map(chr, l)))
def blocks2numList(blocks, n):
"""inverse function of numList2blocks."""
toProcess = copy.copy(blocks)
returnList = []
for numBlock in toProcess:
inner = []
for i in range(0, n):
inner.append(numBlock % 256)
numBlock >>= 8
inner.reverse()
returnList.extend(inner)
return returnList
def encrypt(message, modN, e, blockSize):
"""given a string message, public keys and blockSize, encrypt using
RSA algorithms."""
numList = string2numList(message)
numBlocks = numList2blocks(numList, blockSize) # only one block
message = numBlocks[0]
# return [modExp(blocks, e, modN) for blocks in numBlocks]
return modExp(message, e, modN)
def decrypt(secret, modN, d, blockSize):
"""reverse function of encrypt"""
numBlocks = [modExp(blocks, d, modN) for blocks in secret]
numList = blocks2numList(numBlocks, blockSize)
return numList2string(numList)
def block_size(val):
v = int(val)
assert(v >= 10 and v <= 1000)
return val
# Use xunlei's base64 js code directly
hex2b64 = js2py.eval_js(
'''
function hex2b64(h) {
var b64map="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var b64padchar="=";
var i;
var c;
var ret = "";
for(i = 0; i+3 <= h.length; i+=3) {
c = parseInt(h.substring(i,i+3),16);
ret += b64map.charAt(c >> 6) + b64map.charAt(c & 63);
}
if(i+1 == h.length) {
c = parseInt(h.substring(i,i+1),16);
ret += b64map.charAt(c << 2);
}
else if(i+2 == h.length) {
c = parseInt(h.substring(i,i+2),16);
ret += b64map.charAt(c >> 2) + b64map.charAt((c & 3) << 4);
}
while((ret.length & 3) > 0) ret += b64padchar;
return ret;
}
'''
)
def rsa_encrypt_password(password, verify_code, check_n, check_e):
md5 = hashlib.md5()
md5.update(password)
password_hex = md5.hexdigest()
password_hex += verify_code.upper()
int_n = long(base64.decodestring(check_n).encode('hex'), 16)
int_e = long(base64.decodestring(check_e).encode('hex'), 16)
int_encrypted_password = encrypt(password_hex, int_n, int_e, 128)
encrypted_password = hex2b64(format(int_encrypted_password, 'x'))
return encrypted_password
|
lazygunner/xunleipy | xunleipy/rsa_lib.py | blocks2numList | python | def blocks2numList(blocks, n):
toProcess = copy.copy(blocks)
returnList = []
for numBlock in toProcess:
inner = []
for i in range(0, n):
inner.append(numBlock % 256)
numBlock >>= 8
inner.reverse()
returnList.extend(inner)
return returnList | inverse function of numList2blocks. | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rsa_lib.py#L239-L250 | null | #!/usr/bin/env python
import copy
import math
import pickle
import random
from itertools import combinations
import js2py
import hashlib
import base64
def euclid(a, b):
"""returns the Greatest Common Divisor of a and b"""
a = abs(a)
b = abs(b)
if a < b:
a, b = b, a
while b != 0:
a, b = b, a % b
return a
def coPrime(l):
"""returns 'True' if the values in the list L are all co-prime
otherwise, it returns 'False'. """
for i, j in combinations(l, 2):
if euclid(i, j) != 1:
return False
return True
def extendedEuclid(a, b):
"""return a tuple of three values: x, y and z, such that x is
the GCD of a and b, and x = y * a + z * b"""
if a == 0:
return b, 0, 1
else:
g, y, x = extendedEuclid(b % a, a)
return g, x - (b // a) * y, y
def modInv(a, m):
"""returns the multiplicative inverse of a in modulo m as a
positive value between zero and m-1"""
# notice that a and m need to co-prime to each other.
if coPrime([a, m]):
linearCombination = extendedEuclid(a, m)
return linearCombination[1] % m
else:
return 0
def extractTwos(m):
"""m is a positive integer. A tuple (s, d) of integers is returned
such that m = (2 ** s) * d."""
# the problem can be break down to count how many '0's are there in
# the end of bin(m). This can be done this way: m & a stretch of '1's
# which can be represent as (2 ** n) - 1.
assert m >= 0
i = 0
while m & (2 ** i) == 0:
i += 1
return i, m >> i
def int2baseTwo(x):
"""x is a positive integer. Convert it to base two as a list of integers
in reverse order as a list."""
# repeating x >>= 1 and x & 1 will do the trick
assert x >= 0
bitInverse = []
while x != 0:
bitInverse.append(x & 1)
x >>= 1
return bitInverse
def modExp(a, d, n):
"""returns a ** d (mod n)"""
assert d >= 0
assert n >= 0
base2D = int2baseTwo(d)
base2DLength = len(base2D)
modArray = []
result = 1
for i in range(1, base2DLength + 1):
if i == 1:
modArray.append(a % n)
else:
modArray.append((modArray[i - 2] ** 2) % n)
for i in range(0, base2DLength):
if base2D[i] == 1:
result *= base2D[i] * modArray[i]
return result % n
def millerRabin(n, k):
"""
Miller Rabin pseudo-prime test
return True means likely a prime, (how sure about that, depending on k)
return False means definitely a composite.
Raise assertion error when n, k are not positive integers
and n is not 1
"""
assert n >= 1
# ensure n is bigger than 1
assert k > 0
# ensure k is a positive integer so everything down here makes sense
if n == 2:
return True
# make sure to return True if n == 2
if n % 2 == 0:
return False
# immediately return False for all the even numbers bigger than 2
extract2 = extractTwos(n - 1)
s = extract2[0]
d = extract2[1]
assert 2 ** s * d == n - 1
def tryComposite(a):
"""Inner function which will inspect whether a given witness
will reveal the true identity of n. Will only be called within
millerRabin"""
x = modExp(a, d, n)
if x == 1 or x == n - 1:
return None
else:
for j in range(1, s):
x = modExp(x, 2, n)
if x == 1:
return False
elif x == n - 1:
return None
return False
for i in range(0, k):
a = random.randint(2, n - 2)
if tryComposite(a) == False:
return False
return True # actually, we should return probably true.
def primeSieve(k):
"""return a list with length k + 1, showing if list[i] == 1, i is a prime
else if list[i] == 0, i is a composite, if list[i] == -1, not defined"""
def isPrime(n):
"""return True is given number n is absolutely prime,
return False is otherwise."""
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
return False
return True
result = [-1] * (k + 1)
for i in range(2, int(k + 1)):
if isPrime(i):
result[i] = 1
else:
result[i] = 0
return result
def findAPrime(a, b, k):
"""Return a pseudo prime number roughly between a and b,
(could be larger than b). Raise ValueError if cannot find a
pseudo prime after 10 * ln(x) + 3 tries. """
x = random.randint(a, b)
for i in range(0, int(10 * math.log(x) + 3)):
if millerRabin(x, k):
return x
else:
x += 1
raise ValueError
def newKey(a, b, k):
""" Try to find two large pseudo primes roughly between a and b.
Generate public and private keys for RSA encryption.
Raises ValueError if it fails to find one"""
try:
p = findAPrime(a, b, k)
while True:
q = findAPrime(a, b, k)
if q != p:
break
except:
raise ValueError
n = p * q
m = (p - 1) * (q - 1)
while True:
e = random.randint(1, m)
if coPrime([e, m]):
break
d = modInv(e, m)
return (n, e, d)
def string2numList(strn):
"""Converts a string to a list of integers based on ASCII values"""
"""origin pickle has bug """
return [ord(chars) for chars in list(strn)]
def numList2string(l):
"""Converts a list of integers to a string based on ASCII values"""
return pickle.loads(''.join(map(chr, l)))
def numList2blocks(l, n):
"""Take a list of integers(each between 0 and 127), and combines them
into block size n using base 256. If len(L) % n != 0, use some random
junk to fill L to make it."""
# Note that ASCII printable characters range is 0x20 - 0x7E
returnList = []
toProcess = copy.copy(l)
''' copy message ascii list'''
if len(toProcess) % n != 0:
for i in range(0, n - len(toProcess) % n):
''' append rand str to list'''
toProcess.append(random.randint(32, 126))
toProcess[len(l)] = 0 # 0 after origin message list
''' generate int from ascii number list'''
for i in range(0, len(toProcess), n):
block = 0
for j in range(0, n):
block += toProcess[i + j] << (8 * (n - j - 1))
returnList.append(block)
return returnList
def encrypt(message, modN, e, blockSize):
"""given a string message, public keys and blockSize, encrypt using
RSA algorithms."""
numList = string2numList(message)
numBlocks = numList2blocks(numList, blockSize) # only one block
message = numBlocks[0]
# return [modExp(blocks, e, modN) for blocks in numBlocks]
return modExp(message, e, modN)
def decrypt(secret, modN, d, blockSize):
"""reverse function of encrypt"""
numBlocks = [modExp(blocks, d, modN) for blocks in secret]
numList = blocks2numList(numBlocks, blockSize)
return numList2string(numList)
def block_size(val):
v = int(val)
assert(v >= 10 and v <= 1000)
return val
# Use xunlei's base64 js code directly
hex2b64 = js2py.eval_js(
'''
function hex2b64(h) {
var b64map="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var b64padchar="=";
var i;
var c;
var ret = "";
for(i = 0; i+3 <= h.length; i+=3) {
c = parseInt(h.substring(i,i+3),16);
ret += b64map.charAt(c >> 6) + b64map.charAt(c & 63);
}
if(i+1 == h.length) {
c = parseInt(h.substring(i,i+1),16);
ret += b64map.charAt(c << 2);
}
else if(i+2 == h.length) {
c = parseInt(h.substring(i,i+2),16);
ret += b64map.charAt(c >> 2) + b64map.charAt((c & 3) << 4);
}
while((ret.length & 3) > 0) ret += b64padchar;
return ret;
}
'''
)
def rsa_encrypt_password(password, verify_code, check_n, check_e):
md5 = hashlib.md5()
md5.update(password)
password_hex = md5.hexdigest()
password_hex += verify_code.upper()
int_n = long(base64.decodestring(check_n).encode('hex'), 16)
int_e = long(base64.decodestring(check_e).encode('hex'), 16)
int_encrypted_password = encrypt(password_hex, int_n, int_e, 128)
encrypted_password = hex2b64(format(int_encrypted_password, 'x'))
return encrypted_password
|
lazygunner/xunleipy | xunleipy/rsa_lib.py | encrypt | python | def encrypt(message, modN, e, blockSize):
numList = string2numList(message)
numBlocks = numList2blocks(numList, blockSize) # only one block
message = numBlocks[0]
# return [modExp(blocks, e, modN) for blocks in numBlocks]
return modExp(message, e, modN) | given a string message, public keys and blockSize, encrypt using
RSA algorithms. | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rsa_lib.py#L253-L260 | [
"def modExp(a, d, n):\n \"\"\"returns a ** d (mod n)\"\"\"\n assert d >= 0\n assert n >= 0\n base2D = int2baseTwo(d)\n base2DLength = len(base2D)\n modArray = []\n result = 1\n for i in range(1, base2DLength + 1):\n if i == 1:\n modArray.append(a % n)\n else:\n ... | #!/usr/bin/env python
import copy
import math
import pickle
import random
from itertools import combinations
import js2py
import hashlib
import base64
def euclid(a, b):
"""returns the Greatest Common Divisor of a and b"""
a = abs(a)
b = abs(b)
if a < b:
a, b = b, a
while b != 0:
a, b = b, a % b
return a
def coPrime(l):
"""returns 'True' if the values in the list L are all co-prime
otherwise, it returns 'False'. """
for i, j in combinations(l, 2):
if euclid(i, j) != 1:
return False
return True
def extendedEuclid(a, b):
"""return a tuple of three values: x, y and z, such that x is
the GCD of a and b, and x = y * a + z * b"""
if a == 0:
return b, 0, 1
else:
g, y, x = extendedEuclid(b % a, a)
return g, x - (b // a) * y, y
def modInv(a, m):
"""returns the multiplicative inverse of a in modulo m as a
positive value between zero and m-1"""
# notice that a and m need to co-prime to each other.
if coPrime([a, m]):
linearCombination = extendedEuclid(a, m)
return linearCombination[1] % m
else:
return 0
def extractTwos(m):
"""m is a positive integer. A tuple (s, d) of integers is returned
such that m = (2 ** s) * d."""
# the problem can be break down to count how many '0's are there in
# the end of bin(m). This can be done this way: m & a stretch of '1's
# which can be represent as (2 ** n) - 1.
assert m >= 0
i = 0
while m & (2 ** i) == 0:
i += 1
return i, m >> i
def int2baseTwo(x):
"""x is a positive integer. Convert it to base two as a list of integers
in reverse order as a list."""
# repeating x >>= 1 and x & 1 will do the trick
assert x >= 0
bitInverse = []
while x != 0:
bitInverse.append(x & 1)
x >>= 1
return bitInverse
def modExp(a, d, n):
"""returns a ** d (mod n)"""
assert d >= 0
assert n >= 0
base2D = int2baseTwo(d)
base2DLength = len(base2D)
modArray = []
result = 1
for i in range(1, base2DLength + 1):
if i == 1:
modArray.append(a % n)
else:
modArray.append((modArray[i - 2] ** 2) % n)
for i in range(0, base2DLength):
if base2D[i] == 1:
result *= base2D[i] * modArray[i]
return result % n
def millerRabin(n, k):
"""
Miller Rabin pseudo-prime test
return True means likely a prime, (how sure about that, depending on k)
return False means definitely a composite.
Raise assertion error when n, k are not positive integers
and n is not 1
"""
assert n >= 1
# ensure n is bigger than 1
assert k > 0
# ensure k is a positive integer so everything down here makes sense
if n == 2:
return True
# make sure to return True if n == 2
if n % 2 == 0:
return False
# immediately return False for all the even numbers bigger than 2
extract2 = extractTwos(n - 1)
s = extract2[0]
d = extract2[1]
assert 2 ** s * d == n - 1
def tryComposite(a):
"""Inner function which will inspect whether a given witness
will reveal the true identity of n. Will only be called within
millerRabin"""
x = modExp(a, d, n)
if x == 1 or x == n - 1:
return None
else:
for j in range(1, s):
x = modExp(x, 2, n)
if x == 1:
return False
elif x == n - 1:
return None
return False
for i in range(0, k):
a = random.randint(2, n - 2)
if tryComposite(a) == False:
return False
return True # actually, we should return probably true.
def primeSieve(k):
"""return a list with length k + 1, showing if list[i] == 1, i is a prime
else if list[i] == 0, i is a composite, if list[i] == -1, not defined"""
def isPrime(n):
"""return True is given number n is absolutely prime,
return False is otherwise."""
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
return False
return True
result = [-1] * (k + 1)
for i in range(2, int(k + 1)):
if isPrime(i):
result[i] = 1
else:
result[i] = 0
return result
def findAPrime(a, b, k):
"""Return a pseudo prime number roughly between a and b,
(could be larger than b). Raise ValueError if cannot find a
pseudo prime after 10 * ln(x) + 3 tries. """
x = random.randint(a, b)
for i in range(0, int(10 * math.log(x) + 3)):
if millerRabin(x, k):
return x
else:
x += 1
raise ValueError
def newKey(a, b, k):
""" Try to find two large pseudo primes roughly between a and b.
Generate public and private keys for RSA encryption.
Raises ValueError if it fails to find one"""
try:
p = findAPrime(a, b, k)
while True:
q = findAPrime(a, b, k)
if q != p:
break
except:
raise ValueError
n = p * q
m = (p - 1) * (q - 1)
while True:
e = random.randint(1, m)
if coPrime([e, m]):
break
d = modInv(e, m)
return (n, e, d)
def string2numList(strn):
"""Converts a string to a list of integers based on ASCII values"""
"""origin pickle has bug """
return [ord(chars) for chars in list(strn)]
def numList2string(l):
"""Converts a list of integers to a string based on ASCII values"""
return pickle.loads(''.join(map(chr, l)))
def numList2blocks(l, n):
"""Take a list of integers(each between 0 and 127), and combines them
into block size n using base 256. If len(L) % n != 0, use some random
junk to fill L to make it."""
# Note that ASCII printable characters range is 0x20 - 0x7E
returnList = []
toProcess = copy.copy(l)
''' copy message ascii list'''
if len(toProcess) % n != 0:
for i in range(0, n - len(toProcess) % n):
''' append rand str to list'''
toProcess.append(random.randint(32, 126))
toProcess[len(l)] = 0 # 0 after origin message list
''' generate int from ascii number list'''
for i in range(0, len(toProcess), n):
block = 0
for j in range(0, n):
block += toProcess[i + j] << (8 * (n - j - 1))
returnList.append(block)
return returnList
def blocks2numList(blocks, n):
"""inverse function of numList2blocks."""
toProcess = copy.copy(blocks)
returnList = []
for numBlock in toProcess:
inner = []
for i in range(0, n):
inner.append(numBlock % 256)
numBlock >>= 8
inner.reverse()
returnList.extend(inner)
return returnList
def decrypt(secret, modN, d, blockSize):
"""reverse function of encrypt"""
numBlocks = [modExp(blocks, d, modN) for blocks in secret]
numList = blocks2numList(numBlocks, blockSize)
return numList2string(numList)
def block_size(val):
v = int(val)
assert(v >= 10 and v <= 1000)
return val
# Use xunlei's base64 js code directly
hex2b64 = js2py.eval_js(
'''
function hex2b64(h) {
var b64map="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var b64padchar="=";
var i;
var c;
var ret = "";
for(i = 0; i+3 <= h.length; i+=3) {
c = parseInt(h.substring(i,i+3),16);
ret += b64map.charAt(c >> 6) + b64map.charAt(c & 63);
}
if(i+1 == h.length) {
c = parseInt(h.substring(i,i+1),16);
ret += b64map.charAt(c << 2);
}
else if(i+2 == h.length) {
c = parseInt(h.substring(i,i+2),16);
ret += b64map.charAt(c >> 2) + b64map.charAt((c & 3) << 4);
}
while((ret.length & 3) > 0) ret += b64padchar;
return ret;
}
'''
)
def rsa_encrypt_password(password, verify_code, check_n, check_e):
md5 = hashlib.md5()
md5.update(password)
password_hex = md5.hexdigest()
password_hex += verify_code.upper()
int_n = long(base64.decodestring(check_n).encode('hex'), 16)
int_e = long(base64.decodestring(check_e).encode('hex'), 16)
int_encrypted_password = encrypt(password_hex, int_n, int_e, 128)
encrypted_password = hex2b64(format(int_encrypted_password, 'x'))
return encrypted_password
|
lazygunner/xunleipy | xunleipy/rsa_lib.py | decrypt | python | def decrypt(secret, modN, d, blockSize):
numBlocks = [modExp(blocks, d, modN) for blocks in secret]
numList = blocks2numList(numBlocks, blockSize)
return numList2string(numList) | reverse function of encrypt | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rsa_lib.py#L263-L267 | [
"def numList2string(l):\n \"\"\"Converts a list of integers to a string based on ASCII values\"\"\"\n return pickle.loads(''.join(map(chr, l)))\n",
"def blocks2numList(blocks, n):\n \"\"\"inverse function of numList2blocks.\"\"\"\n toProcess = copy.copy(blocks)\n returnList = []\n for numBlock i... | #!/usr/bin/env python
import copy
import math
import pickle
import random
from itertools import combinations
import js2py
import hashlib
import base64
def euclid(a, b):
"""returns the Greatest Common Divisor of a and b"""
a = abs(a)
b = abs(b)
if a < b:
a, b = b, a
while b != 0:
a, b = b, a % b
return a
def coPrime(l):
"""returns 'True' if the values in the list L are all co-prime
otherwise, it returns 'False'. """
for i, j in combinations(l, 2):
if euclid(i, j) != 1:
return False
return True
def extendedEuclid(a, b):
"""return a tuple of three values: x, y and z, such that x is
the GCD of a and b, and x = y * a + z * b"""
if a == 0:
return b, 0, 1
else:
g, y, x = extendedEuclid(b % a, a)
return g, x - (b // a) * y, y
def modInv(a, m):
"""returns the multiplicative inverse of a in modulo m as a
positive value between zero and m-1"""
# notice that a and m need to co-prime to each other.
if coPrime([a, m]):
linearCombination = extendedEuclid(a, m)
return linearCombination[1] % m
else:
return 0
def extractTwos(m):
"""m is a positive integer. A tuple (s, d) of integers is returned
such that m = (2 ** s) * d."""
# the problem can be break down to count how many '0's are there in
# the end of bin(m). This can be done this way: m & a stretch of '1's
# which can be represent as (2 ** n) - 1.
assert m >= 0
i = 0
while m & (2 ** i) == 0:
i += 1
return i, m >> i
def int2baseTwo(x):
"""x is a positive integer. Convert it to base two as a list of integers
in reverse order as a list."""
# repeating x >>= 1 and x & 1 will do the trick
assert x >= 0
bitInverse = []
while x != 0:
bitInverse.append(x & 1)
x >>= 1
return bitInverse
def modExp(a, d, n):
"""returns a ** d (mod n)"""
assert d >= 0
assert n >= 0
base2D = int2baseTwo(d)
base2DLength = len(base2D)
modArray = []
result = 1
for i in range(1, base2DLength + 1):
if i == 1:
modArray.append(a % n)
else:
modArray.append((modArray[i - 2] ** 2) % n)
for i in range(0, base2DLength):
if base2D[i] == 1:
result *= base2D[i] * modArray[i]
return result % n
def millerRabin(n, k):
"""
Miller Rabin pseudo-prime test
return True means likely a prime, (how sure about that, depending on k)
return False means definitely a composite.
Raise assertion error when n, k are not positive integers
and n is not 1
"""
assert n >= 1
# ensure n is bigger than 1
assert k > 0
# ensure k is a positive integer so everything down here makes sense
if n == 2:
return True
# make sure to return True if n == 2
if n % 2 == 0:
return False
# immediately return False for all the even numbers bigger than 2
extract2 = extractTwos(n - 1)
s = extract2[0]
d = extract2[1]
assert 2 ** s * d == n - 1
def tryComposite(a):
"""Inner function which will inspect whether a given witness
will reveal the true identity of n. Will only be called within
millerRabin"""
x = modExp(a, d, n)
if x == 1 or x == n - 1:
return None
else:
for j in range(1, s):
x = modExp(x, 2, n)
if x == 1:
return False
elif x == n - 1:
return None
return False
for i in range(0, k):
a = random.randint(2, n - 2)
if tryComposite(a) == False:
return False
return True # actually, we should return probably true.
def primeSieve(k):
"""return a list with length k + 1, showing if list[i] == 1, i is a prime
else if list[i] == 0, i is a composite, if list[i] == -1, not defined"""
def isPrime(n):
"""return True is given number n is absolutely prime,
return False is otherwise."""
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
return False
return True
result = [-1] * (k + 1)
for i in range(2, int(k + 1)):
if isPrime(i):
result[i] = 1
else:
result[i] = 0
return result
def findAPrime(a, b, k):
"""Return a pseudo prime number roughly between a and b,
(could be larger than b). Raise ValueError if cannot find a
pseudo prime after 10 * ln(x) + 3 tries. """
x = random.randint(a, b)
for i in range(0, int(10 * math.log(x) + 3)):
if millerRabin(x, k):
return x
else:
x += 1
raise ValueError
def newKey(a, b, k):
""" Try to find two large pseudo primes roughly between a and b.
Generate public and private keys for RSA encryption.
Raises ValueError if it fails to find one"""
try:
p = findAPrime(a, b, k)
while True:
q = findAPrime(a, b, k)
if q != p:
break
except:
raise ValueError
n = p * q
m = (p - 1) * (q - 1)
while True:
e = random.randint(1, m)
if coPrime([e, m]):
break
d = modInv(e, m)
return (n, e, d)
def string2numList(strn):
"""Converts a string to a list of integers based on ASCII values"""
"""origin pickle has bug """
return [ord(chars) for chars in list(strn)]
def numList2string(l):
"""Converts a list of integers to a string based on ASCII values"""
return pickle.loads(''.join(map(chr, l)))
def numList2blocks(l, n):
"""Take a list of integers(each between 0 and 127), and combines them
into block size n using base 256. If len(L) % n != 0, use some random
junk to fill L to make it."""
# Note that ASCII printable characters range is 0x20 - 0x7E
returnList = []
toProcess = copy.copy(l)
''' copy message ascii list'''
if len(toProcess) % n != 0:
for i in range(0, n - len(toProcess) % n):
''' append rand str to list'''
toProcess.append(random.randint(32, 126))
toProcess[len(l)] = 0 # 0 after origin message list
''' generate int from ascii number list'''
for i in range(0, len(toProcess), n):
block = 0
for j in range(0, n):
block += toProcess[i + j] << (8 * (n - j - 1))
returnList.append(block)
return returnList
def blocks2numList(blocks, n):
"""inverse function of numList2blocks."""
toProcess = copy.copy(blocks)
returnList = []
for numBlock in toProcess:
inner = []
for i in range(0, n):
inner.append(numBlock % 256)
numBlock >>= 8
inner.reverse()
returnList.extend(inner)
return returnList
def encrypt(message, modN, e, blockSize):
"""given a string message, public keys and blockSize, encrypt using
RSA algorithms."""
numList = string2numList(message)
numBlocks = numList2blocks(numList, blockSize) # only one block
message = numBlocks[0]
# return [modExp(blocks, e, modN) for blocks in numBlocks]
return modExp(message, e, modN)
def block_size(val):
v = int(val)
assert(v >= 10 and v <= 1000)
return val
# Use xunlei's base64 js code directly
hex2b64 = js2py.eval_js(
'''
function hex2b64(h) {
var b64map="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var b64padchar="=";
var i;
var c;
var ret = "";
for(i = 0; i+3 <= h.length; i+=3) {
c = parseInt(h.substring(i,i+3),16);
ret += b64map.charAt(c >> 6) + b64map.charAt(c & 63);
}
if(i+1 == h.length) {
c = parseInt(h.substring(i,i+1),16);
ret += b64map.charAt(c << 2);
}
else if(i+2 == h.length) {
c = parseInt(h.substring(i,i+2),16);
ret += b64map.charAt(c >> 2) + b64map.charAt((c & 3) << 4);
}
while((ret.length & 3) > 0) ret += b64padchar;
return ret;
}
'''
)
def rsa_encrypt_password(password, verify_code, check_n, check_e):
md5 = hashlib.md5()
md5.update(password)
password_hex = md5.hexdigest()
password_hex += verify_code.upper()
int_n = long(base64.decodestring(check_n).encode('hex'), 16)
int_e = long(base64.decodestring(check_e).encode('hex'), 16)
int_encrypted_password = encrypt(password_hex, int_n, int_e, 128)
encrypted_password = hex2b64(format(int_encrypted_password, 'x'))
return encrypted_password
|
lazygunner/xunleipy | xunleipy/rk.py | RClient.rk_create | python | def rk_create(self, im, im_type, timeout=60):
params = {
'typeid': im_type,
'timeout': timeout,
}
params.update(self.base_params)
files = {'image': ('check_code.png', im)}
r = requests.post('http://api.ruokuai.com/create.json', data=params, files=files, headers=self.headers)
return r.json() | im: 图片字节
im_type: 题目类型 | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rk.py#L32-L44 | null | class RClient(object):
def __init__(self,
username,
password,
soft_id='37093',
soft_key='72e005a3a3f2480c900c32a2ac2660d5'):
self.username = username
self.password = md5(password).hexdigest()
self.soft_id = soft_id
self.soft_key = soft_key
self.base_params = {
'username': self.username,
'password': self.password,
'softid': self.soft_id,
'softkey': self.soft_key,
}
self.headers = {
'Connection': 'Keep-Alive',
'Expect': '100-continue',
'User-Agent': 'ben',
}
def rk_report_error(self, im_id):
"""
im_id:报错题目的ID
"""
params = {
'id': im_id,
}
params.update(self.base_params)
r = requests.post('http://api.ruokuai.com/reporterror.json', data=params, headers=self.headers)
return r.json()
|
lazygunner/xunleipy | xunleipy/rk.py | RClient.rk_report_error | python | def rk_report_error(self, im_id):
params = {
'id': im_id,
}
params.update(self.base_params)
r = requests.post('http://api.ruokuai.com/reporterror.json', data=params, headers=self.headers)
return r.json() | im_id:报错题目的ID | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rk.py#L46-L55 | null | class RClient(object):
def __init__(self,
username,
password,
soft_id='37093',
soft_key='72e005a3a3f2480c900c32a2ac2660d5'):
self.username = username
self.password = md5(password).hexdigest()
self.soft_id = soft_id
self.soft_key = soft_key
self.base_params = {
'username': self.username,
'password': self.password,
'softid': self.soft_id,
'softkey': self.soft_key,
}
self.headers = {
'Connection': 'Keep-Alive',
'Expect': '100-continue',
'User-Agent': 'ben',
}
def rk_create(self, im, im_type, timeout=60):
"""
im: 图片字节
im_type: 题目类型
"""
params = {
'typeid': im_type,
'timeout': timeout,
}
params.update(self.base_params)
files = {'image': ('check_code.png', im)}
r = requests.post('http://api.ruokuai.com/create.json', data=params, files=files, headers=self.headers)
return r.json()
|
lazygunner/xunleipy | xunleipy/remote.py | XunLeiRemote.get_remote_peer_list | python | def get_remote_peer_list(self):
'''
listPeer 返回列表
{
"rtn":0,
"peerList": [{
"category": "",
"status": 0,
"name": "GUNNER_HOME",
"vodPort": 43566,
"company": "XUNLEI_MIPS_BE_MIPS32",
"pid": "8498352EB4F5208X0001",
"lastLoginTime": 1412053233,
"accesscode": "",
"localIP": "",
"location": "\u6d59\u6c5f\u7701 \u8054\u901a",
"online": 1,
"path_list": "C:/",
"type": 30,
"deviceVersion": 22083310
}]
}
'''
params = {
'type': 0,
'v': DEFAULT_V,
'ct': 2
}
res = self._get('listPeer', params=params)
return res['peerList'] | listPeer 返回列表
{
"rtn":0,
"peerList": [{
"category": "",
"status": 0,
"name": "GUNNER_HOME",
"vodPort": 43566,
"company": "XUNLEI_MIPS_BE_MIPS32",
"pid": "8498352EB4F5208X0001",
"lastLoginTime": 1412053233,
"accesscode": "",
"localIP": "",
"location": "\u6d59\u6c5f\u7701 \u8054\u901a",
"online": 1,
"path_list": "C:/",
"type": 30,
"deviceVersion": 22083310
}]
} | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/remote.py#L78-L108 | [
"def _get(self, url, **kwargs):\n return self._request(\n method='get',\n url=url,\n **kwargs\n )\n"
] | class XunLeiRemote(XunLei):
def __init__(self,
username,
password,
rk_username=None,
rk_password=None,
proxy=None):
super(XunLeiRemote, self).__init__(
username, password, rk_username, rk_password, proxy=proxy
)
self.pid = ''
def _request(self, method, url, **kwargs):
url = REMOTE_BASE_URL + url
if 'params' not in kwargs:
kwargs['params'] = {}
if method == 'post':
if 'data' not in kwargs:
kwargs['data'] = {}
if isinstance(kwargs['data'], dict):
data = json.dumps(kwargs['data'], ensure_ascii=False)
data = data.encode('utf-8')
kwargs['data'] = data
result = self.session.request(
method=method,
url=url,
**kwargs
)
result.raise_for_status()
data = result.json()
if data['rtn'] != 0:
print('request for %s failed, code:%s', url, data['rtn'])
return data
def _get(self, url, **kwargs):
return self._request(
method='get',
url=url,
**kwargs
)
def _post(self, url, **kwargs):
return self._request(
method='post',
url=url,
**kwargs
)
def get_default_task_list(self):
peer_list = self.get_remote_peer_list()
if len(peer_list) == 0:
return []
default_peer = peer_list[0]
self.pid = default_peer['pid']
return self.get_remote_task_list(self.pid)
def get_remote_task_list(
self, peer_id, list_type=ListType.downloading, pos=0, number=10):
'''
list 返回列表
{
"recycleNum": 0,
"serverFailNum": 0,
"rtn": 0,
"completeNum": 34,
"sync": 0,
"tasks": [{
"failCode": 15414,
"vipChannel": {
"available": 0,
"failCode": 0,
"opened": 0,
"type": 0,
"dlBytes": 0,
"speed": 0
},
"name": "Blablaba",
"url": "magnet:?xt=urn:btih:5DF6B321CCBDEBE1D52E8E15CBFC6F002",
"speed": 0,
"lixianChannel": {
"failCode": 0,
"serverProgress": 0,
"dlBytes": 0,
"state": 0,
"serverSpeed": 0,
"speed": 0
},
"downTime": 0,
"subList": [],
"createTime": 1412217010,
"state": 8,
"remainTime": 0,
"progress": 0,
"path": "/tmp/thunder/volumes/C:/TDDOWNLOAD/",
"type": 2,
"id": "39",
"completeTime": 0,
"size": 0
},
...
]
}
'''
params = {
'pid': peer_id,
'type': list_type,
'pos': pos,
'number': number,
'needUrl': 1,
'v': DEFAULT_V,
'ct': DEFAULT_CT
}
res = self._get('list', params=params)
return res['tasks']
def check_url(self, pid, url_list):
'''
urlCheck 返回数据
{
"rtn": 0,
"taskInfo": {
"failCode": 0,
"name": ".HDTVrip.1024X576.mkv",
"url": "ed2k://|file|%E6%B0%",
"type": 1,
"id": "0",
"size": 505005442
}
}
'''
task_list = []
for url in url_list:
params = {
'pid': pid,
'url': url,
'type': 1,
'v': DEFAULT_V,
'ct': DEFAULT_CT
}
res = self._get('urlCheck', params=params)
if res['rtn'] == 0:
task_info = res['taskInfo']
task_list.append({
'url': task_info['url'],
'name': task_info['name'],
'filesize': task_info['size'],
'gcid': '',
'cid': ''
})
else:
print(
'url [%s] check failed, code:%s.',
url,
task_info['failCode']
)
return task_list
def add_urls_to_remote(self, pid, path='C:/TDDOWNLOAD/', url_list=[]):
task_list = []
for url in url_list:
task = resolve_url(url)
if task == {}:
logger.info('Invalid URL:%s', url)
continue
else:
task_list.append(task)
return self.add_tasks_to_remote(pid, path, task_list)
def add_tasks_to_remote(self, pid, path='C:/TDDOWNLOAD/', task_list=[]):
'''
post data:
{
"path":"C:/TDDOWNLOAD/",
"tasks":[{
"url":"ed2k://|file|%E6%B0%B8%E6%81%92.Forever...",
"name":"永恒.Forever.S01E02.中英字幕.WEB-HR.mkv",
"gcid":"",
"cid":"",
"filesize":512807020
}]
}
return data:
{
"tasks": [{
"name": "\u6c38\u6052.Fore76.x264.mkv",
"url": "ed2k://|file|%E6%B0%B8%E6%81%92",
"result": 202,
"taskid": "48",
"msg": "repeate_taskid:48",
"id": 1
}],
"rtn": 0
}
'''
if len(task_list) == 0:
return []
params = {
'pid': pid,
'v': DEFAULT_V,
'ct': DEFAULT_CT,
}
data = {
'path': path,
'tasks': task_list
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
data = json.dumps(data)
data = quote(data)
data = 'json=' + data
res = self._post(
'createTask',
params=params,
data=data,
headers=headers
)
return res
def delete_tasks_by_task_infos(self, pid, task_infos, recycle=True,
del_file=True):
if len(task_infos) == 0:
return []
del_tasks = []
for t in task_infos:
del_tasks.append(t['id'] + '_' + str(t['state']))
del_tasks_string = ','.join(del_tasks)
params = {
'pid': pid,
'tasks': del_tasks_string,
'recycleTask': 1 if recycle else 0,
'deleteFile': 'true' if del_file else 'false'
}
res = self._get('del', params=params)
return res
def delete_all_tasks_in_recycle(self, pid):
params = {
'pid': pid,
'tasks': '-1_64',
'recycleTask': 0,
'deleteFile': 'true'
}
res = self._get('del', params=params)
return res
|
lazygunner/xunleipy | xunleipy/remote.py | XunLeiRemote.get_remote_task_list | python | def get_remote_task_list(
self, peer_id, list_type=ListType.downloading, pos=0, number=10):
'''
list 返回列表
{
"recycleNum": 0,
"serverFailNum": 0,
"rtn": 0,
"completeNum": 34,
"sync": 0,
"tasks": [{
"failCode": 15414,
"vipChannel": {
"available": 0,
"failCode": 0,
"opened": 0,
"type": 0,
"dlBytes": 0,
"speed": 0
},
"name": "Blablaba",
"url": "magnet:?xt=urn:btih:5DF6B321CCBDEBE1D52E8E15CBFC6F002",
"speed": 0,
"lixianChannel": {
"failCode": 0,
"serverProgress": 0,
"dlBytes": 0,
"state": 0,
"serverSpeed": 0,
"speed": 0
},
"downTime": 0,
"subList": [],
"createTime": 1412217010,
"state": 8,
"remainTime": 0,
"progress": 0,
"path": "/tmp/thunder/volumes/C:/TDDOWNLOAD/",
"type": 2,
"id": "39",
"completeTime": 0,
"size": 0
},
...
]
}
'''
params = {
'pid': peer_id,
'type': list_type,
'pos': pos,
'number': number,
'needUrl': 1,
'v': DEFAULT_V,
'ct': DEFAULT_CT
}
res = self._get('list', params=params)
return res['tasks'] | list 返回列表
{
"recycleNum": 0,
"serverFailNum": 0,
"rtn": 0,
"completeNum": 34,
"sync": 0,
"tasks": [{
"failCode": 15414,
"vipChannel": {
"available": 0,
"failCode": 0,
"opened": 0,
"type": 0,
"dlBytes": 0,
"speed": 0
},
"name": "Blablaba",
"url": "magnet:?xt=urn:btih:5DF6B321CCBDEBE1D52E8E15CBFC6F002",
"speed": 0,
"lixianChannel": {
"failCode": 0,
"serverProgress": 0,
"dlBytes": 0,
"state": 0,
"serverSpeed": 0,
"speed": 0
},
"downTime": 0,
"subList": [],
"createTime": 1412217010,
"state": 8,
"remainTime": 0,
"progress": 0,
"path": "/tmp/thunder/volumes/C:/TDDOWNLOAD/",
"type": 2,
"id": "39",
"completeTime": 0,
"size": 0
},
...
]
} | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/remote.py#L118-L176 | [
"def _get(self, url, **kwargs):\n return self._request(\n method='get',\n url=url,\n **kwargs\n )\n"
] | class XunLeiRemote(XunLei):
def __init__(self,
username,
password,
rk_username=None,
rk_password=None,
proxy=None):
super(XunLeiRemote, self).__init__(
username, password, rk_username, rk_password, proxy=proxy
)
self.pid = ''
def _request(self, method, url, **kwargs):
url = REMOTE_BASE_URL + url
if 'params' not in kwargs:
kwargs['params'] = {}
if method == 'post':
if 'data' not in kwargs:
kwargs['data'] = {}
if isinstance(kwargs['data'], dict):
data = json.dumps(kwargs['data'], ensure_ascii=False)
data = data.encode('utf-8')
kwargs['data'] = data
result = self.session.request(
method=method,
url=url,
**kwargs
)
result.raise_for_status()
data = result.json()
if data['rtn'] != 0:
print('request for %s failed, code:%s', url, data['rtn'])
return data
def _get(self, url, **kwargs):
return self._request(
method='get',
url=url,
**kwargs
)
def _post(self, url, **kwargs):
return self._request(
method='post',
url=url,
**kwargs
)
def get_remote_peer_list(self):
'''
listPeer 返回列表
{
"rtn":0,
"peerList": [{
"category": "",
"status": 0,
"name": "GUNNER_HOME",
"vodPort": 43566,
"company": "XUNLEI_MIPS_BE_MIPS32",
"pid": "8498352EB4F5208X0001",
"lastLoginTime": 1412053233,
"accesscode": "",
"localIP": "",
"location": "\u6d59\u6c5f\u7701 \u8054\u901a",
"online": 1,
"path_list": "C:/",
"type": 30,
"deviceVersion": 22083310
}]
}
'''
params = {
'type': 0,
'v': DEFAULT_V,
'ct': 2
}
res = self._get('listPeer', params=params)
return res['peerList']
def get_default_task_list(self):
peer_list = self.get_remote_peer_list()
if len(peer_list) == 0:
return []
default_peer = peer_list[0]
self.pid = default_peer['pid']
return self.get_remote_task_list(self.pid)
def check_url(self, pid, url_list):
'''
urlCheck 返回数据
{
"rtn": 0,
"taskInfo": {
"failCode": 0,
"name": ".HDTVrip.1024X576.mkv",
"url": "ed2k://|file|%E6%B0%",
"type": 1,
"id": "0",
"size": 505005442
}
}
'''
task_list = []
for url in url_list:
params = {
'pid': pid,
'url': url,
'type': 1,
'v': DEFAULT_V,
'ct': DEFAULT_CT
}
res = self._get('urlCheck', params=params)
if res['rtn'] == 0:
task_info = res['taskInfo']
task_list.append({
'url': task_info['url'],
'name': task_info['name'],
'filesize': task_info['size'],
'gcid': '',
'cid': ''
})
else:
print(
'url [%s] check failed, code:%s.',
url,
task_info['failCode']
)
return task_list
def add_urls_to_remote(self, pid, path='C:/TDDOWNLOAD/', url_list=[]):
task_list = []
for url in url_list:
task = resolve_url(url)
if task == {}:
logger.info('Invalid URL:%s', url)
continue
else:
task_list.append(task)
return self.add_tasks_to_remote(pid, path, task_list)
def add_tasks_to_remote(self, pid, path='C:/TDDOWNLOAD/', task_list=[]):
'''
post data:
{
"path":"C:/TDDOWNLOAD/",
"tasks":[{
"url":"ed2k://|file|%E6%B0%B8%E6%81%92.Forever...",
"name":"永恒.Forever.S01E02.中英字幕.WEB-HR.mkv",
"gcid":"",
"cid":"",
"filesize":512807020
}]
}
return data:
{
"tasks": [{
"name": "\u6c38\u6052.Fore76.x264.mkv",
"url": "ed2k://|file|%E6%B0%B8%E6%81%92",
"result": 202,
"taskid": "48",
"msg": "repeate_taskid:48",
"id": 1
}],
"rtn": 0
}
'''
if len(task_list) == 0:
return []
params = {
'pid': pid,
'v': DEFAULT_V,
'ct': DEFAULT_CT,
}
data = {
'path': path,
'tasks': task_list
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
data = json.dumps(data)
data = quote(data)
data = 'json=' + data
res = self._post(
'createTask',
params=params,
data=data,
headers=headers
)
return res
def delete_tasks_by_task_infos(self, pid, task_infos, recycle=True,
del_file=True):
if len(task_infos) == 0:
return []
del_tasks = []
for t in task_infos:
del_tasks.append(t['id'] + '_' + str(t['state']))
del_tasks_string = ','.join(del_tasks)
params = {
'pid': pid,
'tasks': del_tasks_string,
'recycleTask': 1 if recycle else 0,
'deleteFile': 'true' if del_file else 'false'
}
res = self._get('del', params=params)
return res
def delete_all_tasks_in_recycle(self, pid):
params = {
'pid': pid,
'tasks': '-1_64',
'recycleTask': 0,
'deleteFile': 'true'
}
res = self._get('del', params=params)
return res
|
lazygunner/xunleipy | xunleipy/remote.py | XunLeiRemote.check_url | python | def check_url(self, pid, url_list):
'''
urlCheck 返回数据
{
"rtn": 0,
"taskInfo": {
"failCode": 0,
"name": ".HDTVrip.1024X576.mkv",
"url": "ed2k://|file|%E6%B0%",
"type": 1,
"id": "0",
"size": 505005442
}
}
'''
task_list = []
for url in url_list:
params = {
'pid': pid,
'url': url,
'type': 1,
'v': DEFAULT_V,
'ct': DEFAULT_CT
}
res = self._get('urlCheck', params=params)
if res['rtn'] == 0:
task_info = res['taskInfo']
task_list.append({
'url': task_info['url'],
'name': task_info['name'],
'filesize': task_info['size'],
'gcid': '',
'cid': ''
})
else:
print(
'url [%s] check failed, code:%s.',
url,
task_info['failCode']
)
return task_list | urlCheck 返回数据
{
"rtn": 0,
"taskInfo": {
"failCode": 0,
"name": ".HDTVrip.1024X576.mkv",
"url": "ed2k://|file|%E6%B0%",
"type": 1,
"id": "0",
"size": 505005442
}
} | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/remote.py#L178-L220 | [
"def _get(self, url, **kwargs):\n return self._request(\n method='get',\n url=url,\n **kwargs\n )\n"
] | class XunLeiRemote(XunLei):
def __init__(self,
username,
password,
rk_username=None,
rk_password=None,
proxy=None):
super(XunLeiRemote, self).__init__(
username, password, rk_username, rk_password, proxy=proxy
)
self.pid = ''
def _request(self, method, url, **kwargs):
url = REMOTE_BASE_URL + url
if 'params' not in kwargs:
kwargs['params'] = {}
if method == 'post':
if 'data' not in kwargs:
kwargs['data'] = {}
if isinstance(kwargs['data'], dict):
data = json.dumps(kwargs['data'], ensure_ascii=False)
data = data.encode('utf-8')
kwargs['data'] = data
result = self.session.request(
method=method,
url=url,
**kwargs
)
result.raise_for_status()
data = result.json()
if data['rtn'] != 0:
print('request for %s failed, code:%s', url, data['rtn'])
return data
def _get(self, url, **kwargs):
return self._request(
method='get',
url=url,
**kwargs
)
def _post(self, url, **kwargs):
return self._request(
method='post',
url=url,
**kwargs
)
def get_remote_peer_list(self):
'''
listPeer 返回列表
{
"rtn":0,
"peerList": [{
"category": "",
"status": 0,
"name": "GUNNER_HOME",
"vodPort": 43566,
"company": "XUNLEI_MIPS_BE_MIPS32",
"pid": "8498352EB4F5208X0001",
"lastLoginTime": 1412053233,
"accesscode": "",
"localIP": "",
"location": "\u6d59\u6c5f\u7701 \u8054\u901a",
"online": 1,
"path_list": "C:/",
"type": 30,
"deviceVersion": 22083310
}]
}
'''
params = {
'type': 0,
'v': DEFAULT_V,
'ct': 2
}
res = self._get('listPeer', params=params)
return res['peerList']
def get_default_task_list(self):
peer_list = self.get_remote_peer_list()
if len(peer_list) == 0:
return []
default_peer = peer_list[0]
self.pid = default_peer['pid']
return self.get_remote_task_list(self.pid)
def get_remote_task_list(
self, peer_id, list_type=ListType.downloading, pos=0, number=10):
'''
list 返回列表
{
"recycleNum": 0,
"serverFailNum": 0,
"rtn": 0,
"completeNum": 34,
"sync": 0,
"tasks": [{
"failCode": 15414,
"vipChannel": {
"available": 0,
"failCode": 0,
"opened": 0,
"type": 0,
"dlBytes": 0,
"speed": 0
},
"name": "Blablaba",
"url": "magnet:?xt=urn:btih:5DF6B321CCBDEBE1D52E8E15CBFC6F002",
"speed": 0,
"lixianChannel": {
"failCode": 0,
"serverProgress": 0,
"dlBytes": 0,
"state": 0,
"serverSpeed": 0,
"speed": 0
},
"downTime": 0,
"subList": [],
"createTime": 1412217010,
"state": 8,
"remainTime": 0,
"progress": 0,
"path": "/tmp/thunder/volumes/C:/TDDOWNLOAD/",
"type": 2,
"id": "39",
"completeTime": 0,
"size": 0
},
...
]
}
'''
params = {
'pid': peer_id,
'type': list_type,
'pos': pos,
'number': number,
'needUrl': 1,
'v': DEFAULT_V,
'ct': DEFAULT_CT
}
res = self._get('list', params=params)
return res['tasks']
def add_urls_to_remote(self, pid, path='C:/TDDOWNLOAD/', url_list=[]):
task_list = []
for url in url_list:
task = resolve_url(url)
if task == {}:
logger.info('Invalid URL:%s', url)
continue
else:
task_list.append(task)
return self.add_tasks_to_remote(pid, path, task_list)
def add_tasks_to_remote(self, pid, path='C:/TDDOWNLOAD/', task_list=[]):
'''
post data:
{
"path":"C:/TDDOWNLOAD/",
"tasks":[{
"url":"ed2k://|file|%E6%B0%B8%E6%81%92.Forever...",
"name":"永恒.Forever.S01E02.中英字幕.WEB-HR.mkv",
"gcid":"",
"cid":"",
"filesize":512807020
}]
}
return data:
{
"tasks": [{
"name": "\u6c38\u6052.Fore76.x264.mkv",
"url": "ed2k://|file|%E6%B0%B8%E6%81%92",
"result": 202,
"taskid": "48",
"msg": "repeate_taskid:48",
"id": 1
}],
"rtn": 0
}
'''
if len(task_list) == 0:
return []
params = {
'pid': pid,
'v': DEFAULT_V,
'ct': DEFAULT_CT,
}
data = {
'path': path,
'tasks': task_list
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
data = json.dumps(data)
data = quote(data)
data = 'json=' + data
res = self._post(
'createTask',
params=params,
data=data,
headers=headers
)
return res
def delete_tasks_by_task_infos(self, pid, task_infos, recycle=True,
del_file=True):
if len(task_infos) == 0:
return []
del_tasks = []
for t in task_infos:
del_tasks.append(t['id'] + '_' + str(t['state']))
del_tasks_string = ','.join(del_tasks)
params = {
'pid': pid,
'tasks': del_tasks_string,
'recycleTask': 1 if recycle else 0,
'deleteFile': 'true' if del_file else 'false'
}
res = self._get('del', params=params)
return res
def delete_all_tasks_in_recycle(self, pid):
params = {
'pid': pid,
'tasks': '-1_64',
'recycleTask': 0,
'deleteFile': 'true'
}
res = self._get('del', params=params)
return res
|
lazygunner/xunleipy | xunleipy/remote.py | XunLeiRemote.add_tasks_to_remote | python | def add_tasks_to_remote(self, pid, path='C:/TDDOWNLOAD/', task_list=[]):
'''
post data:
{
"path":"C:/TDDOWNLOAD/",
"tasks":[{
"url":"ed2k://|file|%E6%B0%B8%E6%81%92.Forever...",
"name":"永恒.Forever.S01E02.中英字幕.WEB-HR.mkv",
"gcid":"",
"cid":"",
"filesize":512807020
}]
}
return data:
{
"tasks": [{
"name": "\u6c38\u6052.Fore76.x264.mkv",
"url": "ed2k://|file|%E6%B0%B8%E6%81%92",
"result": 202,
"taskid": "48",
"msg": "repeate_taskid:48",
"id": 1
}],
"rtn": 0
}
'''
if len(task_list) == 0:
return []
params = {
'pid': pid,
'v': DEFAULT_V,
'ct': DEFAULT_CT,
}
data = {
'path': path,
'tasks': task_list
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
data = json.dumps(data)
data = quote(data)
data = 'json=' + data
res = self._post(
'createTask',
params=params,
data=data,
headers=headers
)
return res | post data:
{
"path":"C:/TDDOWNLOAD/",
"tasks":[{
"url":"ed2k://|file|%E6%B0%B8%E6%81%92.Forever...",
"name":"永恒.Forever.S01E02.中英字幕.WEB-HR.mkv",
"gcid":"",
"cid":"",
"filesize":512807020
}]
}
return data:
{
"tasks": [{
"name": "\u6c38\u6052.Fore76.x264.mkv",
"url": "ed2k://|file|%E6%B0%B8%E6%81%92",
"result": 202,
"taskid": "48",
"msg": "repeate_taskid:48",
"id": 1
}],
"rtn": 0
} | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/remote.py#L233-L290 | [
"def _post(self, url, **kwargs):\n return self._request(\n method='post',\n url=url,\n **kwargs\n )\n"
] | class XunLeiRemote(XunLei):
def __init__(self,
username,
password,
rk_username=None,
rk_password=None,
proxy=None):
super(XunLeiRemote, self).__init__(
username, password, rk_username, rk_password, proxy=proxy
)
self.pid = ''
def _request(self, method, url, **kwargs):
url = REMOTE_BASE_URL + url
if 'params' not in kwargs:
kwargs['params'] = {}
if method == 'post':
if 'data' not in kwargs:
kwargs['data'] = {}
if isinstance(kwargs['data'], dict):
data = json.dumps(kwargs['data'], ensure_ascii=False)
data = data.encode('utf-8')
kwargs['data'] = data
result = self.session.request(
method=method,
url=url,
**kwargs
)
result.raise_for_status()
data = result.json()
if data['rtn'] != 0:
print('request for %s failed, code:%s', url, data['rtn'])
return data
def _get(self, url, **kwargs):
return self._request(
method='get',
url=url,
**kwargs
)
def _post(self, url, **kwargs):
return self._request(
method='post',
url=url,
**kwargs
)
def get_remote_peer_list(self):
'''
listPeer 返回列表
{
"rtn":0,
"peerList": [{
"category": "",
"status": 0,
"name": "GUNNER_HOME",
"vodPort": 43566,
"company": "XUNLEI_MIPS_BE_MIPS32",
"pid": "8498352EB4F5208X0001",
"lastLoginTime": 1412053233,
"accesscode": "",
"localIP": "",
"location": "\u6d59\u6c5f\u7701 \u8054\u901a",
"online": 1,
"path_list": "C:/",
"type": 30,
"deviceVersion": 22083310
}]
}
'''
params = {
'type': 0,
'v': DEFAULT_V,
'ct': 2
}
res = self._get('listPeer', params=params)
return res['peerList']
def get_default_task_list(self):
peer_list = self.get_remote_peer_list()
if len(peer_list) == 0:
return []
default_peer = peer_list[0]
self.pid = default_peer['pid']
return self.get_remote_task_list(self.pid)
def get_remote_task_list(
self, peer_id, list_type=ListType.downloading, pos=0, number=10):
'''
list 返回列表
{
"recycleNum": 0,
"serverFailNum": 0,
"rtn": 0,
"completeNum": 34,
"sync": 0,
"tasks": [{
"failCode": 15414,
"vipChannel": {
"available": 0,
"failCode": 0,
"opened": 0,
"type": 0,
"dlBytes": 0,
"speed": 0
},
"name": "Blablaba",
"url": "magnet:?xt=urn:btih:5DF6B321CCBDEBE1D52E8E15CBFC6F002",
"speed": 0,
"lixianChannel": {
"failCode": 0,
"serverProgress": 0,
"dlBytes": 0,
"state": 0,
"serverSpeed": 0,
"speed": 0
},
"downTime": 0,
"subList": [],
"createTime": 1412217010,
"state": 8,
"remainTime": 0,
"progress": 0,
"path": "/tmp/thunder/volumes/C:/TDDOWNLOAD/",
"type": 2,
"id": "39",
"completeTime": 0,
"size": 0
},
...
]
}
'''
params = {
'pid': peer_id,
'type': list_type,
'pos': pos,
'number': number,
'needUrl': 1,
'v': DEFAULT_V,
'ct': DEFAULT_CT
}
res = self._get('list', params=params)
return res['tasks']
def check_url(self, pid, url_list):
'''
urlCheck 返回数据
{
"rtn": 0,
"taskInfo": {
"failCode": 0,
"name": ".HDTVrip.1024X576.mkv",
"url": "ed2k://|file|%E6%B0%",
"type": 1,
"id": "0",
"size": 505005442
}
}
'''
task_list = []
for url in url_list:
params = {
'pid': pid,
'url': url,
'type': 1,
'v': DEFAULT_V,
'ct': DEFAULT_CT
}
res = self._get('urlCheck', params=params)
if res['rtn'] == 0:
task_info = res['taskInfo']
task_list.append({
'url': task_info['url'],
'name': task_info['name'],
'filesize': task_info['size'],
'gcid': '',
'cid': ''
})
else:
print(
'url [%s] check failed, code:%s.',
url,
task_info['failCode']
)
return task_list
def add_urls_to_remote(self, pid, path='C:/TDDOWNLOAD/', url_list=[]):
task_list = []
for url in url_list:
task = resolve_url(url)
if task == {}:
logger.info('Invalid URL:%s', url)
continue
else:
task_list.append(task)
return self.add_tasks_to_remote(pid, path, task_list)
def delete_tasks_by_task_infos(self, pid, task_infos, recycle=True,
del_file=True):
if len(task_infos) == 0:
return []
del_tasks = []
for t in task_infos:
del_tasks.append(t['id'] + '_' + str(t['state']))
del_tasks_string = ','.join(del_tasks)
params = {
'pid': pid,
'tasks': del_tasks_string,
'recycleTask': 1 if recycle else 0,
'deleteFile': 'true' if del_file else 'false'
}
res = self._get('del', params=params)
return res
def delete_all_tasks_in_recycle(self, pid):
params = {
'pid': pid,
'tasks': '-1_64',
'recycleTask': 0,
'deleteFile': 'true'
}
res = self._get('del', params=params)
return res
|
lazygunner/xunleipy | xunleipy/fp.py | _get_random_fp_raw | python | def _get_random_fp_raw():
'''
生成随机的原始指纹列表
'''
fp_list = []
fp_list.append(get_random_ua()) # ua
fp_list.append('zh-CN') # language
fp_list.append('24') # color depth
fp_list.append(__get_random_screen_resolution())
fp_list.append('-480') # time zone offsite
fp_list.append('true') # session storage
fp_list.append('true') # local storage
fp_list.append('true') # indexed db
fp_list.append('') # add behavior
fp_list.append('function') # open database
fp_list.append('') # cpu class
fp_list.append('MacIntel') # platform
fp_list.append('') # do not track
fp_list.append(
'Widevine Content Decryption Module::Enables Widevine \
licenses for playback of HTML audio/video content. \
(version: 1.4.8.962)::application/x-ppapi-widevine-cdm~;'
) # plugin string
return fp_list | 生成随机的原始指纹列表 | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/fp.py#L23-L47 | [
"def get_random_ua():\n import random\n user_agent_list = [\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1\",\n \"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11\",\n ... | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import random
import base64
import time
import six
import requests
import js2py
from .utils import get_random_ua
def __get_random_screen_resolution():
return random.choice([
'1080*1920',
'960*1620',
'800*600',
'540*720',
])
def get_fp_raw():
'''
生成fp_raw_str
'''
fp_file_path = os.path.expanduser('~/.xunleipy_fp')
fp_list = []
try:
with open(fp_file_path, 'r') as fp_file:
fp_str = fp_file.readline()
if len(fp_str) > 0:
fp_list = fp_str.split('###')
except IOError:
pass
if len(fp_list) < 14:
fp_list = _get_random_fp_raw()
fp_str = '###'.join(fp_list)
with open(fp_file_path, 'w') as fp_file:
fp_file.write(fp_str)
source = fp_str.strip()
if six.PY3:
source = source.encode('utf-8')
fp_raw = base64.b64encode(source)
return fp_raw
def get_fp_sign(fp_raw):
rsp = requests.get(
'https://login.xunlei.com/risk?cmd=algorithm&t=' +
str(time.time() * 1000)
)
sign = ''
try:
xl_al = js2py.eval_js(rsp.content)
sign = xl_al(fp_raw)
except Exception as e:
print(e)
return sign
|
lazygunner/xunleipy | xunleipy/fp.py | get_fp_raw | python | def get_fp_raw():
'''
生成fp_raw_str
'''
fp_file_path = os.path.expanduser('~/.xunleipy_fp')
fp_list = []
try:
with open(fp_file_path, 'r') as fp_file:
fp_str = fp_file.readline()
if len(fp_str) > 0:
fp_list = fp_str.split('###')
except IOError:
pass
if len(fp_list) < 14:
fp_list = _get_random_fp_raw()
fp_str = '###'.join(fp_list)
with open(fp_file_path, 'w') as fp_file:
fp_file.write(fp_str)
source = fp_str.strip()
if six.PY3:
source = source.encode('utf-8')
fp_raw = base64.b64encode(source)
return fp_raw | 生成fp_raw_str | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/fp.py#L50-L75 | [
"def _get_random_fp_raw():\n '''\n 生成随机的原始指纹列表\n '''\n fp_list = []\n fp_list.append(get_random_ua()) # ua\n fp_list.append('zh-CN') # language\n fp_list.append('24') # color depth\n fp_list.append(__get_random_screen_resolution())\n fp_list.append('-480') # time zone offsite\n ... | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import random
import base64
import time
import six
import requests
import js2py
from .utils import get_random_ua
def __get_random_screen_resolution():
return random.choice([
'1080*1920',
'960*1620',
'800*600',
'540*720',
])
def _get_random_fp_raw():
'''
生成随机的原始指纹列表
'''
fp_list = []
fp_list.append(get_random_ua()) # ua
fp_list.append('zh-CN') # language
fp_list.append('24') # color depth
fp_list.append(__get_random_screen_resolution())
fp_list.append('-480') # time zone offsite
fp_list.append('true') # session storage
fp_list.append('true') # local storage
fp_list.append('true') # indexed db
fp_list.append('') # add behavior
fp_list.append('function') # open database
fp_list.append('') # cpu class
fp_list.append('MacIntel') # platform
fp_list.append('') # do not track
fp_list.append(
'Widevine Content Decryption Module::Enables Widevine \
licenses for playback of HTML audio/video content. \
(version: 1.4.8.962)::application/x-ppapi-widevine-cdm~;'
) # plugin string
return fp_list
def get_fp_sign(fp_raw):
rsp = requests.get(
'https://login.xunlei.com/risk?cmd=algorithm&t=' +
str(time.time() * 1000)
)
sign = ''
try:
xl_al = js2py.eval_js(rsp.content)
sign = xl_al(fp_raw)
except Exception as e:
print(e)
return sign
|
appknox/pyaxmlparser | pyaxmlparser/stringblock.py | StringBlock._decode8 | python | def _decode8(self, offset):
# UTF-8 Strings contain two lengths, as they might differ:
# 1) the UTF-16 length
str_len, skip = self._decode_length(offset, 1)
offset += skip
# 2) the utf-8 string length
encoded_bytes, skip = self._decode_length(offset, 1)
offset += skip
data = self.m_charbuff[offset: offset + encoded_bytes]
assert self.m_charbuff[offset + encoded_bytes] == 0, \
"UTF-8 String is not null terminated! At offset={}".format(offset)
return self._decode_bytes(data, 'utf-8', str_len) | Decode an UTF-8 String at the given offset
:param offset: offset of the string inside the data
:return: str | train | https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/stringblock.py#L150-L171 | [
"def _decode_bytes(data, encoding, str_len):\n \"\"\"\n Generic decoding with length check.\n The string is decoded from bytes with the given encoding, then the length\n of the string is checked.\n The string is decoded using the \"replace\" method.\n\n :param data: bytes\n :param encoding: enc... | class StringBlock(object):
"""
StringBlock is a CHUNK inside an AXML File
It contains all strings, which are used by referecing to ID's
See http://androidxref.com/9.0.0_r3/xref/frameworks/base/libs/androidfw/include/androidfw/ResourceTypes.h#436
"""
def __init__(self, buff, header):
"""
:param buff: buffer which holds the string block
:param header: a instance of :class:`~ARSCHeader`
"""
self._cache = {}
self.header = header
# We already read the header (which was chunk_type and chunk_size
# Now, we read the string_count:
self.stringCount = unpack('<I', buff.read(4))[0]
# style_count
self.styleCount = unpack('<I', buff.read(4))[0]
# flags
self.flags = unpack('<I', buff.read(4))[0]
self.m_isUTF8 = ((self.flags & const.UTF8_FLAG) != 0)
# string_pool_offset
# The string offset is counted from the beginning of the string section
self.stringsOffset = unpack('<I', buff.read(4))[0]
# style_pool_offset
# The styles offset is counted as well from the beginning of the string section
self.stylesOffset = unpack('<I', buff.read(4))[0]
# Check if they supplied a stylesOffset even if the count is 0:
if self.styleCount == 0 and self.stylesOffset > 0:
log.info("Styles Offset given, but styleCount is zero. "
"This is not a problem but could indicate packers.")
self.m_stringOffsets = []
self.m_styleOffsets = []
self.m_charbuff = ""
self.m_styles = []
# Next, there is a list of string following.
# This is only a list of offsets (4 byte each)
for i in range(self.stringCount):
self.m_stringOffsets.append(unpack('<I', buff.read(4))[0])
# And a list of styles
# again, a list of offsets
for i in range(self.styleCount):
self.m_styleOffsets.append(unpack('<I', buff.read(4))[0])
# FIXME it is probably better to parse n strings and not calculate the size
size = self.header.size - self.stringsOffset
# if there are styles as well, we do not want to read them too.
# Only read them, if no
if self.stylesOffset != 0 and self.styleCount != 0:
size = self.stylesOffset - self.stringsOffset
if (size % 4) != 0:
log.warning("Size of strings is not aligned by four bytes.")
self.m_charbuff = buff.read(size)
if self.stylesOffset != 0 and self.styleCount != 0:
size = self.header.size - self.stylesOffset
if (size % 4) != 0:
log.warning("Size of styles is not aligned by four bytes.")
for i in range(0, size // 4):
self.m_styles.append(unpack('<I', buff.read(4))[0])
def __getitem__(self, idx):
"""
Returns the string at the index in the string table
"""
return self.getString(idx)
def __len__(self):
"""
Get the number of strings stored in this table
"""
return self.stringCount
def __iter__(self):
"""
Iterable over all strings
"""
for i in range(self.stringCount):
yield self.getString(i)
def getString(self, idx):
"""
Return the string at the index in the string table
:param idx: index in the string table
:return: str
"""
if idx in self._cache:
return self._cache[idx]
if idx < 0 or not self.m_stringOffsets or idx > self.stringCount:
return ""
offset = self.m_stringOffsets[idx]
if self.m_isUTF8:
self._cache[idx] = self._decode8(offset)
else:
self._cache[idx] = self._decode16(offset)
return self._cache[idx]
def getStyle(self, idx):
"""
Return the style associated with the index
:param idx: index of the style
:return:
"""
return self.m_styles[idx]
def _decode16(self, offset):
"""
Decode an UTF-16 String at the given offset
:param offset: offset of the string inside the data
:return: str
"""
str_len, skip = self._decode_length(offset, 2)
offset += skip
# The len is the string len in utf-16 units
encoded_bytes = str_len * 2
data = self.m_charbuff[offset: offset + encoded_bytes]
assert self.m_charbuff[offset + encoded_bytes:offset + encoded_bytes + 2] == b"\x00\x00", \
"UTF-16 String is not null terminated! At offset={}".format(offset)
return self._decode_bytes(data, 'utf-16', str_len)
@staticmethod
def _decode_bytes(data, encoding, str_len):
"""
Generic decoding with length check.
The string is decoded from bytes with the given encoding, then the length
of the string is checked.
The string is decoded using the "replace" method.
:param data: bytes
:param encoding: encoding name ("utf-8" or "utf-16")
:param str_len: length of the decoded string
:return: str
"""
string = data.decode(encoding, 'replace')
if len(string) != str_len:
log.warning("invalid decoded string length")
return string
def _decode_length(self, offset, sizeof_char):
"""
Generic Length Decoding at offset of string
The method works for both 8 and 16 bit Strings.
Length checks are enforced:
* 8 bit strings: maximum of 0x7FFF bytes (See
http://androidxref.com/9.0.0_r3/xref/frameworks/base/libs/androidfw/ResourceTypes.cpp#692)
* 16 bit strings: maximum of 0x7FFFFFF bytes (See
http://androidxref.com/9.0.0_r3/xref/frameworks/base/libs/androidfw/ResourceTypes.cpp#670)
:param offset: offset into the string data section of the beginning of
the string
:param sizeof_char: number of bytes per char (1 = 8bit, 2 = 16bit)
:returns: tuple of (length, read bytes)
"""
sizeof_2chars = sizeof_char << 1
fmt = "<2{}".format('B' if sizeof_char == 1 else 'H')
highbit = 0x80 << (8 * (sizeof_char - 1))
length1, length2 = unpack(fmt, self.m_charbuff[offset:(offset + sizeof_2chars)])
if (length1 & highbit) != 0:
length = ((length1 & ~highbit) << (8 * sizeof_char)) | length2
size = sizeof_2chars
else:
length = length1
size = sizeof_char
if sizeof_char == 1:
assert length <= 0x7FFF, "length of UTF-8 string is too large! At offset={}".format(offset)
else:
assert length <= 0x7FFFFFFF, "length of UTF-16 string is too large! At offset={}".format(offset)
return length, size
def show(self):
"""
Print some information on stdout about the string table
"""
print("StringBlock(stringsCount=0x%x, "
"stringsOffset=0x%x, "
"stylesCount=0x%x, "
"stylesOffset=0x%x, "
"flags=0x%x"
")" % (self.stringCount,
self.stringsOffset,
self.styleCount,
self.stylesOffset,
self.flags))
if self.stringCount > 0:
print()
print("String Table: ")
for i, s in enumerate(self):
print("{:08d} {}".format(i, repr(s)))
if self.styleCount > 0:
print()
print("Styles Table: ")
for i in range(self.styleCount):
print("{:08d} {}".format(i, repr(self.getStyle(i))))
|
appknox/pyaxmlparser | pyaxmlparser/stringblock.py | StringBlock._decode16 | python | def _decode16(self, offset):
str_len, skip = self._decode_length(offset, 2)
offset += skip
# The len is the string len in utf-16 units
encoded_bytes = str_len * 2
data = self.m_charbuff[offset: offset + encoded_bytes]
assert self.m_charbuff[offset + encoded_bytes:offset + encoded_bytes + 2] == b"\x00\x00", \
"UTF-16 String is not null terminated! At offset={}".format(offset)
return self._decode_bytes(data, 'utf-16', str_len) | Decode an UTF-16 String at the given offset
:param offset: offset of the string inside the data
:return: str | train | https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/stringblock.py#L173-L191 | [
"def _decode_bytes(data, encoding, str_len):\n \"\"\"\n Generic decoding with length check.\n The string is decoded from bytes with the given encoding, then the length\n of the string is checked.\n The string is decoded using the \"replace\" method.\n\n :param data: bytes\n :param encoding: enc... | class StringBlock(object):
"""
StringBlock is a CHUNK inside an AXML File
It contains all strings, which are used by referecing to ID's
See http://androidxref.com/9.0.0_r3/xref/frameworks/base/libs/androidfw/include/androidfw/ResourceTypes.h#436
"""
def __init__(self, buff, header):
"""
:param buff: buffer which holds the string block
:param header: a instance of :class:`~ARSCHeader`
"""
self._cache = {}
self.header = header
# We already read the header (which was chunk_type and chunk_size
# Now, we read the string_count:
self.stringCount = unpack('<I', buff.read(4))[0]
# style_count
self.styleCount = unpack('<I', buff.read(4))[0]
# flags
self.flags = unpack('<I', buff.read(4))[0]
self.m_isUTF8 = ((self.flags & const.UTF8_FLAG) != 0)
# string_pool_offset
# The string offset is counted from the beginning of the string section
self.stringsOffset = unpack('<I', buff.read(4))[0]
# style_pool_offset
# The styles offset is counted as well from the beginning of the string section
self.stylesOffset = unpack('<I', buff.read(4))[0]
# Check if they supplied a stylesOffset even if the count is 0:
if self.styleCount == 0 and self.stylesOffset > 0:
log.info("Styles Offset given, but styleCount is zero. "
"This is not a problem but could indicate packers.")
self.m_stringOffsets = []
self.m_styleOffsets = []
self.m_charbuff = ""
self.m_styles = []
# Next, there is a list of string following.
# This is only a list of offsets (4 byte each)
for i in range(self.stringCount):
self.m_stringOffsets.append(unpack('<I', buff.read(4))[0])
# And a list of styles
# again, a list of offsets
for i in range(self.styleCount):
self.m_styleOffsets.append(unpack('<I', buff.read(4))[0])
# FIXME it is probably better to parse n strings and not calculate the size
size = self.header.size - self.stringsOffset
# if there are styles as well, we do not want to read them too.
# Only read them, if no
if self.stylesOffset != 0 and self.styleCount != 0:
size = self.stylesOffset - self.stringsOffset
if (size % 4) != 0:
log.warning("Size of strings is not aligned by four bytes.")
self.m_charbuff = buff.read(size)
if self.stylesOffset != 0 and self.styleCount != 0:
size = self.header.size - self.stylesOffset
if (size % 4) != 0:
log.warning("Size of styles is not aligned by four bytes.")
for i in range(0, size // 4):
self.m_styles.append(unpack('<I', buff.read(4))[0])
def __getitem__(self, idx):
"""
Returns the string at the index in the string table
"""
return self.getString(idx)
def __len__(self):
"""
Get the number of strings stored in this table
"""
return self.stringCount
def __iter__(self):
"""
Iterable over all strings
"""
for i in range(self.stringCount):
yield self.getString(i)
def getString(self, idx):
"""
Return the string at the index in the string table
:param idx: index in the string table
:return: str
"""
if idx in self._cache:
return self._cache[idx]
if idx < 0 or not self.m_stringOffsets or idx > self.stringCount:
return ""
offset = self.m_stringOffsets[idx]
if self.m_isUTF8:
self._cache[idx] = self._decode8(offset)
else:
self._cache[idx] = self._decode16(offset)
return self._cache[idx]
def getStyle(self, idx):
"""
Return the style associated with the index
:param idx: index of the style
:return:
"""
return self.m_styles[idx]
def _decode8(self, offset):
"""
Decode an UTF-8 String at the given offset
:param offset: offset of the string inside the data
:return: str
"""
# UTF-8 Strings contain two lengths, as they might differ:
# 1) the UTF-16 length
str_len, skip = self._decode_length(offset, 1)
offset += skip
# 2) the utf-8 string length
encoded_bytes, skip = self._decode_length(offset, 1)
offset += skip
data = self.m_charbuff[offset: offset + encoded_bytes]
assert self.m_charbuff[offset + encoded_bytes] == 0, \
"UTF-8 String is not null terminated! At offset={}".format(offset)
return self._decode_bytes(data, 'utf-8', str_len)
@staticmethod
def _decode_bytes(data, encoding, str_len):
"""
Generic decoding with length check.
The string is decoded from bytes with the given encoding, then the length
of the string is checked.
The string is decoded using the "replace" method.
:param data: bytes
:param encoding: encoding name ("utf-8" or "utf-16")
:param str_len: length of the decoded string
:return: str
"""
string = data.decode(encoding, 'replace')
if len(string) != str_len:
log.warning("invalid decoded string length")
return string
def _decode_length(self, offset, sizeof_char):
"""
Generic Length Decoding at offset of string
The method works for both 8 and 16 bit Strings.
Length checks are enforced:
* 8 bit strings: maximum of 0x7FFF bytes (See
http://androidxref.com/9.0.0_r3/xref/frameworks/base/libs/androidfw/ResourceTypes.cpp#692)
* 16 bit strings: maximum of 0x7FFFFFF bytes (See
http://androidxref.com/9.0.0_r3/xref/frameworks/base/libs/androidfw/ResourceTypes.cpp#670)
:param offset: offset into the string data section of the beginning of
the string
:param sizeof_char: number of bytes per char (1 = 8bit, 2 = 16bit)
:returns: tuple of (length, read bytes)
"""
sizeof_2chars = sizeof_char << 1
fmt = "<2{}".format('B' if sizeof_char == 1 else 'H')
highbit = 0x80 << (8 * (sizeof_char - 1))
length1, length2 = unpack(fmt, self.m_charbuff[offset:(offset + sizeof_2chars)])
if (length1 & highbit) != 0:
length = ((length1 & ~highbit) << (8 * sizeof_char)) | length2
size = sizeof_2chars
else:
length = length1
size = sizeof_char
if sizeof_char == 1:
assert length <= 0x7FFF, "length of UTF-8 string is too large! At offset={}".format(offset)
else:
assert length <= 0x7FFFFFFF, "length of UTF-16 string is too large! At offset={}".format(offset)
return length, size
def show(self):
"""
Print some information on stdout about the string table
"""
print("StringBlock(stringsCount=0x%x, "
"stringsOffset=0x%x, "
"stylesCount=0x%x, "
"stylesOffset=0x%x, "
"flags=0x%x"
")" % (self.stringCount,
self.stringsOffset,
self.styleCount,
self.stylesOffset,
self.flags))
if self.stringCount > 0:
print()
print("String Table: ")
for i, s in enumerate(self):
print("{:08d} {}".format(i, repr(s)))
if self.styleCount > 0:
print()
print("Styles Table: ")
for i in range(self.styleCount):
print("{:08d} {}".format(i, repr(self.getStyle(i))))
|
appknox/pyaxmlparser | pyaxmlparser/stringblock.py | StringBlock._decode_length | python | def _decode_length(self, offset, sizeof_char):
sizeof_2chars = sizeof_char << 1
fmt = "<2{}".format('B' if sizeof_char == 1 else 'H')
highbit = 0x80 << (8 * (sizeof_char - 1))
length1, length2 = unpack(fmt, self.m_charbuff[offset:(offset + sizeof_2chars)])
if (length1 & highbit) != 0:
length = ((length1 & ~highbit) << (8 * sizeof_char)) | length2
size = sizeof_2chars
else:
length = length1
size = sizeof_char
if sizeof_char == 1:
assert length <= 0x7FFF, "length of UTF-8 string is too large! At offset={}".format(offset)
else:
assert length <= 0x7FFFFFFF, "length of UTF-16 string is too large! At offset={}".format(offset)
return length, size | Generic Length Decoding at offset of string
The method works for both 8 and 16 bit Strings.
Length checks are enforced:
* 8 bit strings: maximum of 0x7FFF bytes (See
http://androidxref.com/9.0.0_r3/xref/frameworks/base/libs/androidfw/ResourceTypes.cpp#692)
* 16 bit strings: maximum of 0x7FFFFFF bytes (See
http://androidxref.com/9.0.0_r3/xref/frameworks/base/libs/androidfw/ResourceTypes.cpp#670)
:param offset: offset into the string data section of the beginning of
the string
:param sizeof_char: number of bytes per char (1 = 8bit, 2 = 16bit)
:returns: tuple of (length, read bytes) | train | https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/stringblock.py#L211-L245 | null | class StringBlock(object):
"""
StringBlock is a CHUNK inside an AXML File
It contains all strings, which are used by referecing to ID's
See http://androidxref.com/9.0.0_r3/xref/frameworks/base/libs/androidfw/include/androidfw/ResourceTypes.h#436
"""
def __init__(self, buff, header):
"""
:param buff: buffer which holds the string block
:param header: a instance of :class:`~ARSCHeader`
"""
self._cache = {}
self.header = header
# We already read the header (which was chunk_type and chunk_size
# Now, we read the string_count:
self.stringCount = unpack('<I', buff.read(4))[0]
# style_count
self.styleCount = unpack('<I', buff.read(4))[0]
# flags
self.flags = unpack('<I', buff.read(4))[0]
self.m_isUTF8 = ((self.flags & const.UTF8_FLAG) != 0)
# string_pool_offset
# The string offset is counted from the beginning of the string section
self.stringsOffset = unpack('<I', buff.read(4))[0]
# style_pool_offset
# The styles offset is counted as well from the beginning of the string section
self.stylesOffset = unpack('<I', buff.read(4))[0]
# Check if they supplied a stylesOffset even if the count is 0:
if self.styleCount == 0 and self.stylesOffset > 0:
log.info("Styles Offset given, but styleCount is zero. "
"This is not a problem but could indicate packers.")
self.m_stringOffsets = []
self.m_styleOffsets = []
self.m_charbuff = ""
self.m_styles = []
# Next, there is a list of string following.
# This is only a list of offsets (4 byte each)
for i in range(self.stringCount):
self.m_stringOffsets.append(unpack('<I', buff.read(4))[0])
# And a list of styles
# again, a list of offsets
for i in range(self.styleCount):
self.m_styleOffsets.append(unpack('<I', buff.read(4))[0])
# FIXME it is probably better to parse n strings and not calculate the size
size = self.header.size - self.stringsOffset
# if there are styles as well, we do not want to read them too.
# Only read them, if no
if self.stylesOffset != 0 and self.styleCount != 0:
size = self.stylesOffset - self.stringsOffset
if (size % 4) != 0:
log.warning("Size of strings is not aligned by four bytes.")
self.m_charbuff = buff.read(size)
if self.stylesOffset != 0 and self.styleCount != 0:
size = self.header.size - self.stylesOffset
if (size % 4) != 0:
log.warning("Size of styles is not aligned by four bytes.")
for i in range(0, size // 4):
self.m_styles.append(unpack('<I', buff.read(4))[0])
def __getitem__(self, idx):
"""
Returns the string at the index in the string table
"""
return self.getString(idx)
def __len__(self):
"""
Get the number of strings stored in this table
"""
return self.stringCount
def __iter__(self):
"""
Iterable over all strings
"""
for i in range(self.stringCount):
yield self.getString(i)
def getString(self, idx):
"""
Return the string at the index in the string table
:param idx: index in the string table
:return: str
"""
if idx in self._cache:
return self._cache[idx]
if idx < 0 or not self.m_stringOffsets or idx > self.stringCount:
return ""
offset = self.m_stringOffsets[idx]
if self.m_isUTF8:
self._cache[idx] = self._decode8(offset)
else:
self._cache[idx] = self._decode16(offset)
return self._cache[idx]
def getStyle(self, idx):
"""
Return the style associated with the index
:param idx: index of the style
:return:
"""
return self.m_styles[idx]
def _decode8(self, offset):
"""
Decode an UTF-8 String at the given offset
:param offset: offset of the string inside the data
:return: str
"""
# UTF-8 Strings contain two lengths, as they might differ:
# 1) the UTF-16 length
str_len, skip = self._decode_length(offset, 1)
offset += skip
# 2) the utf-8 string length
encoded_bytes, skip = self._decode_length(offset, 1)
offset += skip
data = self.m_charbuff[offset: offset + encoded_bytes]
assert self.m_charbuff[offset + encoded_bytes] == 0, \
"UTF-8 String is not null terminated! At offset={}".format(offset)
return self._decode_bytes(data, 'utf-8', str_len)
def _decode16(self, offset):
"""
Decode an UTF-16 String at the given offset
:param offset: offset of the string inside the data
:return: str
"""
str_len, skip = self._decode_length(offset, 2)
offset += skip
# The len is the string len in utf-16 units
encoded_bytes = str_len * 2
data = self.m_charbuff[offset: offset + encoded_bytes]
assert self.m_charbuff[offset + encoded_bytes:offset + encoded_bytes + 2] == b"\x00\x00", \
"UTF-16 String is not null terminated! At offset={}".format(offset)
return self._decode_bytes(data, 'utf-16', str_len)
@staticmethod
def _decode_bytes(data, encoding, str_len):
"""
Generic decoding with length check.
The string is decoded from bytes with the given encoding, then the length
of the string is checked.
The string is decoded using the "replace" method.
:param data: bytes
:param encoding: encoding name ("utf-8" or "utf-16")
:param str_len: length of the decoded string
:return: str
"""
string = data.decode(encoding, 'replace')
if len(string) != str_len:
log.warning("invalid decoded string length")
return string
def show(self):
"""
Print some information on stdout about the string table
"""
print("StringBlock(stringsCount=0x%x, "
"stringsOffset=0x%x, "
"stylesCount=0x%x, "
"stylesOffset=0x%x, "
"flags=0x%x"
")" % (self.stringCount,
self.stringsOffset,
self.styleCount,
self.stylesOffset,
self.flags))
if self.stringCount > 0:
print()
print("String Table: ")
for i, s in enumerate(self):
print("{:08d} {}".format(i, repr(s)))
if self.styleCount > 0:
print()
print("Styles Table: ")
for i in range(self.styleCount):
print("{:08d} {}".format(i, repr(self.getStyle(i))))
|
appknox/pyaxmlparser | pyaxmlparser/axmlprinter.py | AXMLPrinter._fix_name | python | def _fix_name(self, name):
if not name[0].isalpha() and name[0] != "_":
log.warning("Invalid start for name '{}'".format(name))
self.packerwarning = True
name = "_{}".format(name)
if name.startswith("android:"):
# Seems be a common thing...
# Actually this means that the Manifest is likely to be broken, as
# usually no namespace URI is set in this case.
log.warning(
"Name '{}' starts with 'android:' prefix! "
"The Manifest seems to be broken? Removing prefix.".format(
name
)
)
self.packerwarning = True
name = name[len("android:"):]
if ":" in name:
# Print out an extra warning
log.warning("Name seems to contain a namespace prefix: '{}'".format(name))
if not re.match(r"^[a-zA-Z0-9._-]*$", name):
log.warning("Name '{}' contains invalid characters!".format(name))
self.packerwarning = True
name = re.sub(r"[^a-zA-Z0-9._-]", "_", name)
return name | Apply some fixes to element named and attribute names.
Try to get conform to:
> Like element names, attribute names are case-sensitive and must start with a letter or underscore.
> The rest of the name can contain letters, digits, hyphens, underscores, and periods.
See: https://msdn.microsoft.com/en-us/library/ms256152(v=vs.110).aspx
:param name: Name of the attribute
:return: a fixed version of the name | train | https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/axmlprinter.py#L169-L204 | null | class AXMLPrinter:
"""
Converter for AXML Files into a lxml ElementTree, which can easily be
converted into XML.
A Reference Implementation can be found at http://androidxref.com/9.0.0_r3/
xref/frameworks/base/tools/aapt/XMLNode.cpp
"""
__charrange = None
__replacement = None
def __init__(self, raw_buff):
self.axml = AXMLParser(raw_buff)
self.root = None
self.packerwarning = False
cur = []
while self.axml.is_valid():
_type = next(self.axml)
if _type == const.START_TAG:
name = self._fix_name(self.axml.name)
uri = self._print_namespace(self.axml.namespace)
tag = "{}{}".format(uri, name)
comment = self.axml.comment
if comment:
if self.root is None:
log.warning("Can not attach comment with content '{}' without root!".format(comment))
else:
cur[-1].append(etree.Comment(comment))
log.debug("START_TAG: {} (line={})".format(tag, self.axml.m_lineNumber))
elem = etree.Element(tag, nsmap=self.axml.nsmap)
for i in range(self.axml.getAttributeCount()):
uri = self._print_namespace(self.axml.getAttributeNamespace(i))
name = self._fix_name(self.axml.getAttributeName(i))
value = self._fix_value(self._get_attribute_value(i))
log.debug("found an attribute: {}{}='{}'".format(uri, name, value.encode("utf-8")))
if "{}{}".format(uri, name) in elem.attrib:
log.warning("Duplicate attribute '{}{}'! Will overwrite!".format(uri, name))
elem.set("{}{}".format(uri, name), value)
if self.root is None:
self.root = elem
else:
if not cur:
# looks like we lost the root?
log.error("No more elements available to attach to! Is the XML malformed?")
break
cur[-1].append(elem)
cur.append(elem)
if _type == const.END_TAG:
if not cur:
log.warning("Too many END_TAG! No more elements available to attach to!")
name = self.axml.name
uri = self._print_namespace(self.axml.namespace)
tag = "{}{}".format(uri, name)
if cur[-1].tag != tag:
log.warning(
"Closing tag '{}' does not match current stack! "
"At line number: {}. Is the XML malformed?".format(
self.axml.name, self.axml.m_lineNumber
)
)
cur.pop()
if _type == const.TEXT:
log.debug("TEXT for {}".format(cur[-1]))
cur[-1].text = self.axml.text
if _type == const.END_DOCUMENT:
# Check if all namespace mappings are closed
if len(self.axml.namespaces) > 0:
log.warning(
"Not all namespace mappings were closed! Malformed AXML?")
break
def get_buff(self):
"""
Returns the raw XML file without prettification applied.
:returns: bytes, encoded as UTF-8
"""
return self.get_xml(pretty=False)
def get_xml(self, pretty=True):
"""
Get the XML as an UTF-8 string
:returns: bytes encoded as UTF-8
"""
return etree.tostring(self.root, encoding="utf-8", pretty_print=pretty)
def get_xml_obj(self):
"""
Get the XML as an ElementTree object
:returns: :class:`lxml.etree.Element`
"""
return self.root
def is_valid(self):
"""
Return the state of the AXMLParser.
If this flag is set to False, the parsing has failed, thus
the resulting XML will not work or will even be empty.
"""
return self.axml.is_valid()
def is_packed(self):
"""
Returns True if the AXML is likely to be packed
Packers do some weird stuff and we try to detect it.
Sometimes the files are not packed but simply broken or compiled with
some broken version of a tool.
Some file corruption might also be appear to be a packed file.
:returns: True if packer detected, False otherwise
"""
return self.packerwarning
def _get_attribute_value(self, index):
"""
Wrapper function for format_value
to resolve the actual value of an attribute in a tag
:param index: index of the current attribute
:return: formatted value
"""
_type = self.axml.getAttributeValueType(index)
_data = self.axml.getAttributeValueData(index)
return format_value(_type, _data, lambda _: self.axml.getAttributeValue(index))
def _fix_value(self, value):
"""
Return a cleaned version of a value
according to the specification:
> Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
See https://www.w3.org/TR/xml/#charsets
:param value: a value to clean
:return: the cleaned value
"""
if not self.__charrange or not self.__replacement:
if sys.maxunicode == 0xFFFF:
# Fix for python 2.x, surrogate pairs does not match in regex
self.__charrange = re.compile(
u'^([\u0020-\uD7FF\u0009\u000A\u000D\uE000-\uFFFD]|[\uD800-\uDBFF][\uDC00-\uDFFF])*$')
# TODO: this regex is slightly wrong... surrogates are not matched as pairs.
self.__replacement = re.compile(u'[^\u0020-\uDBFF\u0009\u000A\u000D\uE000-\uFFFD\uDC00-\uDFFF]')
else:
self.__charrange = re.compile(u'^[\u0020-\uD7FF\u0009\u000A\u000D\uE000-\uFFFD\U00010000-\U0010FFFF]*$')
self.__replacement = re.compile(u'[^\u0020-\uD7FF\u0009\u000A\u000D\uE000-\uFFFD\U00010000-\U0010FFFF]')
# Reading string until \x00. This is the same as aapt does.
if "\x00" in value:
self.packerwarning = True
log.warning(
"Null byte found in attribute value at position {}: "
"Value(hex): '{}'".format(
value.find("\x00"),
binascii.hexlify(value.encode("utf-8"))
)
)
value = value[:value.find("\x00")]
if not self.__charrange.match(value):
log.warning("Invalid character in value found. Replacing with '_'.")
self.packerwarning = True
value = self.__replacement.sub('_', value)
return value
def _print_namespace(self, uri):
if uri != "":
uri = "{{{}}}".format(uri)
return uri
|
appknox/pyaxmlparser | pyaxmlparser/axmlprinter.py | AXMLPrinter._fix_value | python | def _fix_value(self, value):
if not self.__charrange or not self.__replacement:
if sys.maxunicode == 0xFFFF:
# Fix for python 2.x, surrogate pairs does not match in regex
self.__charrange = re.compile(
u'^([\u0020-\uD7FF\u0009\u000A\u000D\uE000-\uFFFD]|[\uD800-\uDBFF][\uDC00-\uDFFF])*$')
# TODO: this regex is slightly wrong... surrogates are not matched as pairs.
self.__replacement = re.compile(u'[^\u0020-\uDBFF\u0009\u000A\u000D\uE000-\uFFFD\uDC00-\uDFFF]')
else:
self.__charrange = re.compile(u'^[\u0020-\uD7FF\u0009\u000A\u000D\uE000-\uFFFD\U00010000-\U0010FFFF]*$')
self.__replacement = re.compile(u'[^\u0020-\uD7FF\u0009\u000A\u000D\uE000-\uFFFD\U00010000-\U0010FFFF]')
# Reading string until \x00. This is the same as aapt does.
if "\x00" in value:
self.packerwarning = True
log.warning(
"Null byte found in attribute value at position {}: "
"Value(hex): '{}'".format(
value.find("\x00"),
binascii.hexlify(value.encode("utf-8"))
)
)
value = value[:value.find("\x00")]
if not self.__charrange.match(value):
log.warning("Invalid character in value found. Replacing with '_'.")
self.packerwarning = True
value = self.__replacement.sub('_', value)
return value | Return a cleaned version of a value
according to the specification:
> Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
See https://www.w3.org/TR/xml/#charsets
:param value: a value to clean
:return: the cleaned value | train | https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/axmlprinter.py#L206-L244 | null | class AXMLPrinter:
"""
Converter for AXML Files into a lxml ElementTree, which can easily be
converted into XML.
A Reference Implementation can be found at http://androidxref.com/9.0.0_r3/
xref/frameworks/base/tools/aapt/XMLNode.cpp
"""
__charrange = None
__replacement = None
def __init__(self, raw_buff):
self.axml = AXMLParser(raw_buff)
self.root = None
self.packerwarning = False
cur = []
while self.axml.is_valid():
_type = next(self.axml)
if _type == const.START_TAG:
name = self._fix_name(self.axml.name)
uri = self._print_namespace(self.axml.namespace)
tag = "{}{}".format(uri, name)
comment = self.axml.comment
if comment:
if self.root is None:
log.warning("Can not attach comment with content '{}' without root!".format(comment))
else:
cur[-1].append(etree.Comment(comment))
log.debug("START_TAG: {} (line={})".format(tag, self.axml.m_lineNumber))
elem = etree.Element(tag, nsmap=self.axml.nsmap)
for i in range(self.axml.getAttributeCount()):
uri = self._print_namespace(self.axml.getAttributeNamespace(i))
name = self._fix_name(self.axml.getAttributeName(i))
value = self._fix_value(self._get_attribute_value(i))
log.debug("found an attribute: {}{}='{}'".format(uri, name, value.encode("utf-8")))
if "{}{}".format(uri, name) in elem.attrib:
log.warning("Duplicate attribute '{}{}'! Will overwrite!".format(uri, name))
elem.set("{}{}".format(uri, name), value)
if self.root is None:
self.root = elem
else:
if not cur:
# looks like we lost the root?
log.error("No more elements available to attach to! Is the XML malformed?")
break
cur[-1].append(elem)
cur.append(elem)
if _type == const.END_TAG:
if not cur:
log.warning("Too many END_TAG! No more elements available to attach to!")
name = self.axml.name
uri = self._print_namespace(self.axml.namespace)
tag = "{}{}".format(uri, name)
if cur[-1].tag != tag:
log.warning(
"Closing tag '{}' does not match current stack! "
"At line number: {}. Is the XML malformed?".format(
self.axml.name, self.axml.m_lineNumber
)
)
cur.pop()
if _type == const.TEXT:
log.debug("TEXT for {}".format(cur[-1]))
cur[-1].text = self.axml.text
if _type == const.END_DOCUMENT:
# Check if all namespace mappings are closed
if len(self.axml.namespaces) > 0:
log.warning(
"Not all namespace mappings were closed! Malformed AXML?")
break
def get_buff(self):
"""
Returns the raw XML file without prettification applied.
:returns: bytes, encoded as UTF-8
"""
return self.get_xml(pretty=False)
def get_xml(self, pretty=True):
"""
Get the XML as an UTF-8 string
:returns: bytes encoded as UTF-8
"""
return etree.tostring(self.root, encoding="utf-8", pretty_print=pretty)
def get_xml_obj(self):
"""
Get the XML as an ElementTree object
:returns: :class:`lxml.etree.Element`
"""
return self.root
def is_valid(self):
"""
Return the state of the AXMLParser.
If this flag is set to False, the parsing has failed, thus
the resulting XML will not work or will even be empty.
"""
return self.axml.is_valid()
def is_packed(self):
"""
Returns True if the AXML is likely to be packed
Packers do some weird stuff and we try to detect it.
Sometimes the files are not packed but simply broken or compiled with
some broken version of a tool.
Some file corruption might also be appear to be a packed file.
:returns: True if packer detected, False otherwise
"""
return self.packerwarning
def _get_attribute_value(self, index):
"""
Wrapper function for format_value
to resolve the actual value of an attribute in a tag
:param index: index of the current attribute
:return: formatted value
"""
_type = self.axml.getAttributeValueType(index)
_data = self.axml.getAttributeValueData(index)
return format_value(_type, _data, lambda _: self.axml.getAttributeValue(index))
def _fix_name(self, name):
"""
Apply some fixes to element named and attribute names.
Try to get conform to:
> Like element names, attribute names are case-sensitive and must start with a letter or underscore.
> The rest of the name can contain letters, digits, hyphens, underscores, and periods.
See: https://msdn.microsoft.com/en-us/library/ms256152(v=vs.110).aspx
:param name: Name of the attribute
:return: a fixed version of the name
"""
if not name[0].isalpha() and name[0] != "_":
log.warning("Invalid start for name '{}'".format(name))
self.packerwarning = True
name = "_{}".format(name)
if name.startswith("android:"):
# Seems be a common thing...
# Actually this means that the Manifest is likely to be broken, as
# usually no namespace URI is set in this case.
log.warning(
"Name '{}' starts with 'android:' prefix! "
"The Manifest seems to be broken? Removing prefix.".format(
name
)
)
self.packerwarning = True
name = name[len("android:"):]
if ":" in name:
# Print out an extra warning
log.warning("Name seems to contain a namespace prefix: '{}'".format(name))
if not re.match(r"^[a-zA-Z0-9._-]*$", name):
log.warning("Name '{}' contains invalid characters!".format(name))
self.packerwarning = True
name = re.sub(r"[^a-zA-Z0-9._-]", "_", name)
return name
def _print_namespace(self, uri):
if uri != "":
uri = "{{{}}}".format(uri)
return uri
|
appknox/pyaxmlparser | pyaxmlparser/core.py | APK._apk_analysis | python | def _apk_analysis(self):
i = "AndroidManifest.xml"
try:
manifest_data = self.zip.read(i)
except KeyError:
log.warning("Missing AndroidManifest.xml. Is this an APK file?")
else:
ap = AXMLPrinter(manifest_data)
if not ap.is_valid():
log.error("Error while parsing AndroidManifest.xml - is the file valid?")
return
self.axml[i] = ap
self.xml[i] = self.axml[i].get_xml_obj()
if self.axml[i].is_packed():
log.warning("XML Seems to be packed, operations on the AndroidManifest.xml might fail.")
if self.xml[i] is not None:
if self.xml[i].tag != "manifest":
log.error("AndroidManifest.xml does not start with a <manifest> tag! Is this a valid APK?")
return
self.package = self.get_attribute_value("manifest", "package")
self.androidversion["Code"] = self.get_attribute_value("manifest", "versionCode")
self.androidversion["Name"] = self.get_attribute_value("manifest", "versionName")
permission = list(self.get_all_attribute_value("uses-permission", "name"))
self.permissions = list(set(self.permissions + permission))
for uses_permission in self.find_tags("uses-permission"):
self.uses_permissions.append([
self.get_value_from_tag(uses_permission, "name"),
self._get_permission_maxsdk(uses_permission)
])
# getting details of the declared permissions
for d_perm_item in self.find_tags('permission'):
d_perm_name = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "name")))
d_perm_label = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "label")))
d_perm_description = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "description")))
d_perm_permissionGroup = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "permissionGroup")))
d_perm_protectionLevel = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "protectionLevel")))
d_perm_details = {
"label": d_perm_label,
"description": d_perm_description,
"permissionGroup": d_perm_permissionGroup,
"protectionLevel": d_perm_protectionLevel,
}
self.declared_permissions[d_perm_name] = d_perm_details
self.valid_apk = True | Run analysis on the APK file.
This method is usually called by __init__ except if skip_analysis is False.
It will then parse the AndroidManifest.xml and set all fields in the APK class which can be
extracted from the Manifest. | train | https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/core.py#L129-L193 | [
"def is_valid(self):\n \"\"\"\n Return the state of the AXMLParser.\n If this flag is set to False, the parsing has failed, thus\n the resulting XML will not work or will even be empty.\n \"\"\"\n return self.axml.is_valid()\n",
"def _get_res_string_value(self, string):\n if not string.starts... | class APK(object):
def __init__(self, filename, raw=False, magic_file=None, skip_analysis=False, testzip=False):
"""
This class can access to all elements in an APK file
example::
APK("myfile.apk")
APK(read("myfile.apk"), raw=True)
:param filename: specify the path of the file, or raw data
:param raw: specify if the filename is a path or raw data (optional)
:param magic_file: specify the magic file (not used anymore - legacy only)
:param skip_analysis: Skip the analysis, e.g. no manifest files are read. (default: False)
:param testzip: Test the APK for integrity, e.g. if the ZIP file is broken.
Throw an exception on failure (default False)
:type filename: string
:type raw: boolean
:type magic_file: string
:type skip_analysis: boolean
:type testzip: boolean
"""
if magic_file:
log.warning("You set magic_file but this parameter is actually unused. You should remove it.")
self.filename = filename
self.xml = {}
self.axml = {}
self.arsc = {}
self.package = ""
self.androidversion = {}
self.permissions = []
self.uses_permissions = []
self.declared_permissions = {}
self.valid_apk = False
self._files = {}
self.files_crc32 = {}
if raw is True:
self.__raw = bytearray(filename)
self._sha256 = hashlib.sha256(self.__raw).hexdigest()
# Set the filename to something sane
self.filename = "raw_apk_sha256:{}".format(self._sha256)
else:
self.__raw = bytearray(read(filename))
self.zip = zipfile.ZipFile(io.BytesIO(self.__raw), mode="r")
if testzip:
# Test the zipfile for integrity before continuing.
# This process might be slow, as the whole file is read.
# Therefore it is possible to enable it as a separate feature.
#
# A short benchmark showed, that testing the zip takes about 10 times longer!
# e.g. normal zip loading (skip_analysis=True) takes about 0.01s, where
# testzip takes 0.1s!
ret = self.zip.testzip()
if ret is not None:
# we could print the filename here, but there are zip which are so broken
# That the filename is either very very long or does not make any sense.
# Thus we do not do it, the user might find out by using other tools.
raise BrokenAPKError("The APK is probably broken: testzip returned an error.")
if not skip_analysis:
self._apk_analysis()
def _ns(self, name):
"""
return the name including the Android namespace
"""
return NS_ANDROID + name
def __getstate__(self):
"""
Function for pickling APK Objects.
We remove the zip from the Object, as it is not pickable
And it does not make any sense to pickle it anyways.
:return: the picklable APK Object without zip.
"""
# Upon pickling, we need to remove the ZipFile
x = self.__dict__
x['axml'] = str(x['axml'])
x['xml'] = str(x['xml'])
del x['zip']
return x
def __setstate__(self, state):
"""
Load a pickled APK Object and restore the state
We load the zip file back by reading __raw from the Object.
:param state: pickled state
"""
self.__dict__ = state
self.zip = zipfile.ZipFile(io.BytesIO(self.__raw), mode="r")
def _get_res_string_value(self, string):
if not string.startswith('@string/'):
return string
string_key = string[9:]
res_parser = self.get_android_resources()
if not res_parser:
return ''
string_value = ''
for package_name in res_parser.get_packages_names():
extracted_values = res_parser.get_string(package_name, string_key)
if extracted_values:
string_value = extracted_values[1]
break
return string_value
def _get_permission_maxsdk(self, item):
maxSdkVersion = None
try:
maxSdkVersion = int(self.get_value_from_tag(item, "maxSdkVersion"))
except ValueError:
log.warning(self.get_max_sdk_version() + 'is not a valid value for <uses-permission> maxSdkVersion')
except TypeError:
pass
return maxSdkVersion
def is_valid_APK(self):
"""
Return true if the APK is valid, false otherwise.
An APK is seen as valid, if the AndroidManifest.xml could be successful parsed.
This does not mean that the APK has a valid signature nor that the APK
can be installed on an Android system.
:rtype: boolean
"""
return self.valid_apk
def get_filename(self):
"""
Return the filename of the APK
:rtype: :class:`str`
"""
return self.filename
def get_app_name(self):
"""
Return the appname of the APK
This name is read from the AndroidManifest.xml
using the application android:label.
If no label exists, the android:label of the main activity is used.
If there is also no main activity label, an empty string is returned.
:rtype: :class:`str`
"""
app_name = self.get_attribute_value('application', 'label')
if app_name is None:
activities = self.get_main_activities()
main_activity_name = None
if len(activities) > 0:
main_activity_name = activities.pop()
app_name = self.get_attribute_value(
'activity', 'label', name=main_activity_name
)
if app_name is None:
# No App name set
# TODO return packagename instead?
log.warning("It looks like that no app name is set for the main activity!")
return ""
if app_name.startswith("@"):
res_parser = self.get_android_resources()
if not res_parser:
# TODO: What should be the correct return value here?
return app_name
res_id, package = res_parser.parse_id(app_name)
# If the package name is the same as the APK package,
# we should be able to resolve the ID.
if package and package != self.get_package():
if package == 'android':
# TODO: we can not resolve this, as we lack framework-res.apk
# one exception would be when parsing framework-res.apk directly.
log.warning("Resource ID with android package name encountered! "
"Will not resolve, framework-res.apk would be required.")
return app_name
else:
# TODO should look this up, might be in the resources
log.warning("Resource ID with Package name '{}' encountered! Will not resolve".format(package))
return app_name
try:
app_name = res_parser.get_resolved_res_configs(
res_id,
ARSCResTableConfig.default_config())[0][1]
except Exception as e:
log.warning("Exception selecting app name: %s" % e)
return app_name
def get_app_icon(self, max_dpi=65536):
"""
Return the first icon file name, which density is not greater than max_dpi,
unless exact icon resolution is set in the manifest, in which case
return the exact file.
This information is read from the AndroidManifest.xml
From https://developer.android.com/guide/practices/screens_support.html
and https://developer.android.com/ndk/reference/group___configuration.html
* DEFAULT 0dpi
* ldpi (low) 120dpi
* mdpi (medium) 160dpi
* TV 213dpi
* hdpi (high) 240dpi
* xhdpi (extra-high) 320dpi
* xxhdpi (extra-extra-high) 480dpi
* xxxhdpi (extra-extra-extra-high) 640dpi
* anydpi 65534dpi (0xFFFE)
* nodpi 65535dpi (0xFFFF)
There is a difference between nodpi and anydpi:
nodpi will be used if no other density is specified. Or the density does not match.
nodpi is the fallback for everything else. If there is a resource that matches the DPI,
this is used.
anydpi is also valid for all densities but in this case, anydpi will overrule all other files!
Therefore anydpi is usually used with vector graphics and with constraints on the API level.
For example adaptive icons are usually marked as anydpi.
When it comes now to selecting an icon, there is the following flow:
1) is there an anydpi icon?
2) is there an icon for the dpi of the device?
3) is there a nodpi icon?
4) (only on very old devices) is there a icon with dpi 0 (the default)
For more information read here: https://stackoverflow.com/a/34370735/446140
:rtype: :class:`str`
"""
main_activity_name = self.get_main_activity()
app_icon = self.get_attribute_value(
'activity', 'icon', name=main_activity_name)
if not app_icon:
app_icon = self.get_attribute_value('application', 'icon')
res_parser = self.get_android_resources()
if not res_parser:
# Can not do anything below this point to resolve...
return None
if not app_icon:
res_id = res_parser.get_res_id_by_key(self.package, 'mipmap', 'ic_launcher')
if res_id:
app_icon = "@%x" % res_id
if not app_icon:
res_id = res_parser.get_res_id_by_key(self.package, 'drawable', 'ic_launcher')
if res_id:
app_icon = "@%x" % res_id
if not app_icon:
# If the icon can not be found, return now
return None
if app_icon.startswith("@"):
res_id = int(app_icon[1:], 16)
candidates = res_parser.get_resolved_res_configs(res_id)
app_icon = None
current_dpi = -1
try:
for config, file_name in candidates:
dpi = config.get_density()
if current_dpi < dpi <= max_dpi:
app_icon = file_name
current_dpi = dpi
except Exception as e:
log.warning("Exception selecting app icon: %s" % e)
return app_icon
def get_package(self):
"""
Return the name of the package
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.package
def get_androidversion_code(self):
"""
Return the android version code
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.androidversion["Code"]
def get_androidversion_name(self):
"""
Return the android version name
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.androidversion["Name"]
def get_files(self):
"""
Return the file names inside the APK.
:rtype: a list of :class:`str`
"""
return self.zip.namelist()
def _get_file_magic_name(self, buffer):
"""
Return the filetype guessed for a buffer
:param buffer: bytes
:return: str of filetype
"""
default = "Unknown"
ftype = None
try:
# Magic is optional
import magic
except ImportError:
return default
try:
# There are several implementations of magic,
# unfortunately all called magic
# We use this one: https://github.com/ahupp/python-magic/
getattr(magic, "MagicException")
except AttributeError:
# Looks like no magic was installed
return default
try:
ftype = magic.from_buffer(buffer[:1024])
except magic.MagicError as e:
log.exception("Error getting the magic type!")
return default
if not ftype:
return default
else:
return self._patch_magic(buffer, ftype)
@property
def files(self):
"""
Returns a dictionary of filenames and detected magic type
:return: dictionary of files and their mime type
"""
return self.get_files_types()
def get_files_types(self):
"""
Return the files inside the APK with their associated types (by using python-magic)
:rtype: a dictionnary
"""
if self._files == {}:
# Generate File Types / CRC List
for i in self.get_files():
buffer = self.zip.read(i)
self.files_crc32[i] = crc32(buffer)
# FIXME why not use the crc from the zipfile?
# should be validated as well.
# crc = self.zip.getinfo(i).CRC
self._files[i] = self._get_file_magic_name(buffer)
return self._files
def _patch_magic(self, buffer, orig):
"""
Overwrite some probably wrong detections by mime libraries
:param buffer: bytes of the file to detect
:param orig: guess by mime libary
:return: corrected guess
"""
if ("Zip" in orig) or ('(JAR)' in orig):
val = is_android_raw(buffer)
if val == "APK":
return "Android application package file"
return orig
def get_files_crc32(self):
"""
Calculates and returns a dictionary of filenames and CRC32
:return: dict of filename: CRC32
"""
if self.files_crc32 == {}:
for i in self.get_files():
buffer = self.zip.read(i)
self.files_crc32[i] = crc32(buffer)
return self.files_crc32
def get_files_information(self):
"""
Return the files inside the APK with their associated types and crc32
:rtype: str, str, int
"""
for k in self.get_files():
yield k, self.get_files_types()[k], self.get_files_crc32()[k]
def get_raw(self):
"""
Return raw bytes of the APK
:rtype: bytes
"""
return self.__raw
def get_file(self, filename):
"""
Return the raw data of the specified filename
inside the APK
:rtype: bytes
"""
try:
return self.zip.read(filename)
except KeyError:
raise FileNotPresent(filename)
def get_dex(self):
"""
Return the raw data of the classes dex file
This will give you the data of the file called `classes.dex`
inside the APK. If the APK has multiple DEX files, you need to use :func:`~APK.get_all_dex`.
:rtype: bytes
"""
try:
return self.get_file("classes.dex")
except FileNotPresent:
return ""
def get_dex_names(self):
"""
Return the names of all DEX files found in the APK.
This method only accounts for "offical" dex files, i.e. all files
in the root directory of the APK named classes.dex or classes[0-9]+.dex
:rtype: a list of str
"""
dexre = re.compile("classes(\d*).dex")
return filter(lambda x: dexre.match(x), self.get_files())
def get_all_dex(self):
"""
Return the raw data of all classes dex files
:rtype: a generator of bytes
"""
for dex_name in self.get_dex_names():
yield self.get_file(dex_name)
def is_multidex(self):
"""
Test if the APK has multiple DEX files
:return: True if multiple dex found, otherwise False
"""
dexre = re.compile("^classes(\d+)?.dex$")
return len([instance for instance in self.get_files() if dexre.search(instance)]) > 1
@DeprecationWarning
def get_elements(self, tag_name, attribute, with_namespace=True):
"""
Deprecated: use `get_all_attribute_value()` instead
Return elements in xml files which match with the tag name and the specific attribute
:param tag_name: a string which specify the tag name
:param attribute: a string which specify the attribute
"""
for i in self.xml:
if self.xml[i] is None:
continue
for item in self.xml[i].findall('.//' + tag_name):
if with_namespace:
value = item.get(self._ns(attribute))
else:
value = item.get(attribute)
# There might be an attribute without the namespace
if value:
yield self._format_value(value)
def _format_value(self, value):
"""
Format a value with packagename, if not already set
:param value:
:return:
"""
if len(value) > 0:
if value[0] == ".":
value = self.package + value
else:
v_dot = value.find(".")
if v_dot == 0:
value = self.package + "." + value
elif v_dot == -1:
value = self.package + "." + value
return value
@DeprecationWarning
def get_element(self, tag_name, attribute, **attribute_filter):
"""
:Deprecated: use `get_attribute_value()` instead
Return element in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:rtype: string
"""
for i in self.xml:
if self.xml[i] is None:
continue
tag = self.xml[i].findall('.//' + tag_name)
if len(tag) == 0:
return None
for item in tag:
skip_this_item = False
for attr, val in list(attribute_filter.items()):
attr_val = item.get(self._ns(attr))
if attr_val != val:
skip_this_item = True
break
if skip_this_item:
continue
value = item.get(self._ns(attribute))
if value is not None:
return value
return None
def get_all_attribute_value(
self, tag_name, attribute, format_value=True, **attribute_filter
):
"""
Return all the attribute values in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:param format_value: specify if the value needs to be formatted with packagename
:type format_value: boolean
"""
tags = self.find_tags(tag_name, **attribute_filter)
for tag in tags:
value = tag.get(attribute) or tag.get(self._ns(attribute))
if value is not None:
if format_value:
yield self._format_value(value)
else:
yield value
def get_attribute_value(
self, tag_name, attribute, format_value=False, **attribute_filter
):
"""
Return the attribute value in xml files which matches the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:param format_value: specify if the value needs to be formatted with packagename
:type format_value: boolean
"""
for value in self.get_all_attribute_value(
tag_name, attribute, format_value, **attribute_filter):
if value is not None:
return value
def get_value_from_tag(self, tag, attribute):
"""
Return the value of the attribute in a specific tag
:param tag: specify the tag element
:type tag: Element
:param attribute: specify the attribute
:type attribute: string
"""
# TODO: figure out if both android:name and name tag exist which one to give preference
value = tag.get(self._ns(attribute))
if value is None:
log.warning("Failed to get the attribute with namespace")
value = tag.get(attribute)
return value
def find_tags(self, tag_name, **attribute_filter):
"""
Return a list of all the matched tags in all available xml
:param tag: specify the tag name
:type tag: string
"""
all_tags = [
self.find_tags_from_xml(
i, tag_name, **attribute_filter
)
for i in self.xml
]
return [tag for tag_list in all_tags for tag in tag_list]
def find_tags_from_xml(
self, xml_name, tag_name, **attribute_filter
):
"""
Return a list of all the matched tags in a specific xml
:param xml_name: specify from which xml to pick the tag from
:type xml_name: string
:param tag_name: specify the tag name
:type tag_name: string
"""
xml = self.xml[xml_name]
if xml is None:
return []
if xml.tag == tag_name:
if self.is_tag_matched(
xml.tag, **attribute_filter
):
return [xml]
return []
tags = xml.findall(".//" + tag_name)
return [
tag for tag in tags if self.is_tag_matched(
tag, **attribute_filter
)
]
def is_tag_matched(self, tag, **attribute_filter):
"""
Return true if the attributes matches in attribute filter
:param tag: specify the tag element
:type tag: Element
:param attribute: specify the attribute
:type attribute: string
"""
if len(attribute_filter) <= 0:
return True
for attr, value in attribute_filter.items():
# TODO: figure out if both android:name and name tag exist which one to give preference
_value = tag.get(self._ns(attr))
if _value is None:
log.warning("Failed to get the attribute with namespace")
_value = tag.get(attr)
if _value != value:
return False
return True
def get_main_activities(self):
"""
Return names of the main activities
These values are read from the AndroidManifest.xml
:rtype: a set of str
"""
x = set()
y = set()
for i in self.xml:
if self.xml[i] is None:
continue
activities_and_aliases = self.xml[i].findall(".//activity") + \
self.xml[i].findall(".//activity-alias")
for item in activities_and_aliases:
# Some applications have more than one MAIN activity.
# For example: paid and free content
activityEnabled = item.get(self._ns("enabled"))
if activityEnabled == "false":
continue
for sitem in item.findall(".//action"):
val = sitem.get(self._ns("name"))
if val == "android.intent.action.MAIN":
activity = item.get(self._ns("name"))
if activity is not None:
x.add(item.get(self._ns("name")))
else:
log.warning('Main activity without name')
for sitem in item.findall(".//category"):
val = sitem.get(self._ns("name"))
if val == "android.intent.category.LAUNCHER":
activity = item.get(self._ns("name"))
if activity is not None:
y.add(item.get(self._ns("name")))
else:
log.warning('Launcher activity without name')
return x.intersection(y)
def get_main_activity(self):
"""
Return the name of the main activity
This value is read from the AndroidManifest.xml
:rtype: str
"""
activities = self.get_main_activities()
if len(activities) > 0:
return self._format_value(activities.pop())
return None
def get_activities(self):
"""
Return the android:name attribute of all activities
:rtype: a list of str
"""
return list(self.get_all_attribute_value("activity", "name"))
def get_services(self):
"""
Return the android:name attribute of all services
:rtype: a list of str
"""
return list(self.get_all_attribute_value("service", "name"))
def get_receivers(self):
"""
Return the android:name attribute of all receivers
:rtype: a list of string
"""
return list(self.get_all_attribute_value("receiver", "name"))
def get_providers(self):
"""
Return the android:name attribute of all providers
:rtype: a list of string
"""
return list(self.get_all_attribute_value("provider", "name"))
def get_intent_filters(self, itemtype, name):
"""
Find intent filters for a given item and name.
Intent filter are attached to activities, services or receivers.
You can search for the intent filters of such items and get a dictionary of all
attached actions and intent categories.
:param itemtype: the type of parent item to look for, e.g. `activity`, `service` or `receiver`
:param name: the `android:name` of the parent item, e.g. activity name
:return: a dictionary with the keys `action` and `category` containing the `android:name` of those items
"""
d = {"action": [], "category": []}
for i in self.xml:
# TODO: this can probably be solved using a single xpath
for item in self.xml[i].findall(".//" + itemtype):
if self._format_value(item.get(self._ns("name"))) == name:
for sitem in item.findall(".//intent-filter"):
for ssitem in sitem.findall("action"):
if ssitem.get(self._ns("name")) not in d["action"]:
d["action"].append(ssitem.get(self._ns("name")))
for ssitem in sitem.findall("category"):
if ssitem.get(self._ns("name")) not in d["category"]:
d["category"].append(ssitem.get(self._ns("name")))
if not d["action"]:
del d["action"]
if not d["category"]:
del d["category"]
return d
def get_permissions(self):
"""
Return permissions names declared in the AndroidManifest.xml.
It is possible that permissions are returned multiple times,
as this function does not filter the permissions, i.e. it shows you
exactly what was defined in the AndroidManifest.xml.
Implied permissions, which are granted automatically, are not returned
here. Use :meth:`get_uses_implied_permission_list` if you need a list
of implied permissions.
:returns: A list of permissions
:rtype: list
"""
return self.permissions
def get_uses_implied_permission_list(self):
"""
Return all permissions implied by the target SDK or other permissions.
:rtype: list of string
"""
target_sdk_version = self.get_effective_target_sdk_version()
READ_CALL_LOG = 'android.permission.READ_CALL_LOG'
READ_CONTACTS = 'android.permission.READ_CONTACTS'
READ_EXTERNAL_STORAGE = 'android.permission.READ_EXTERNAL_STORAGE'
READ_PHONE_STATE = 'android.permission.READ_PHONE_STATE'
WRITE_CALL_LOG = 'android.permission.WRITE_CALL_LOG'
WRITE_CONTACTS = 'android.permission.WRITE_CONTACTS'
WRITE_EXTERNAL_STORAGE = 'android.permission.WRITE_EXTERNAL_STORAGE'
implied = []
implied_WRITE_EXTERNAL_STORAGE = False
if target_sdk_version < 4:
if WRITE_EXTERNAL_STORAGE not in self.permissions:
implied.append([WRITE_EXTERNAL_STORAGE, None])
implied_WRITE_EXTERNAL_STORAGE = True
if READ_PHONE_STATE not in self.permissions:
implied.append([READ_PHONE_STATE, None])
if (WRITE_EXTERNAL_STORAGE in self.permissions or implied_WRITE_EXTERNAL_STORAGE) \
and READ_EXTERNAL_STORAGE not in self.permissions:
maxSdkVersion = None
for name, version in self.uses_permissions:
if name == WRITE_EXTERNAL_STORAGE:
maxSdkVersion = version
break
implied.append([READ_EXTERNAL_STORAGE, maxSdkVersion])
if target_sdk_version < 16:
if READ_CONTACTS in self.permissions \
and READ_CALL_LOG not in self.permissions:
implied.append([READ_CALL_LOG, None])
if WRITE_CONTACTS in self.permissions \
and WRITE_CALL_LOG not in self.permissions:
implied.append([WRITE_CALL_LOG, None])
return implied
def get_details_permissions(self):
"""
Return permissions with details
:rtype: dict of {permission: [protectionLevel, label, description]}
"""
l = {}
for i in self.permissions:
if i in self.permission_module:
x = self.permission_module[i]
l[i] = [x["protectionLevel"], x["label"], x["description"]]
else:
# FIXME: the permission might be signature, if it is defined by the app itself!
l[i] = ["normal", "Unknown permission from android reference",
"Unknown permission from android reference"]
return l
@DeprecationWarning
def get_requested_permissions(self):
"""
Returns all requested permissions.
It has the same result as :meth:`get_permissions` and might be removed in the future
:rtype: list of str
"""
return self.get_permissions()
def get_requested_aosp_permissions(self):
"""
Returns requested permissions declared within AOSP project.
This includes several other permissions as well, which are in the platform apps.
:rtype: list of str
"""
aosp_permissions = []
all_permissions = self.get_permissions()
for perm in all_permissions:
if perm in list(self.permission_module.keys()):
aosp_permissions.append(perm)
return aosp_permissions
def get_requested_aosp_permissions_details(self):
"""
Returns requested aosp permissions with details.
:rtype: dictionary
"""
l = {}
for i in self.permissions:
try:
l[i] = self.permission_module[i]
except KeyError:
# if we have not found permission do nothing
continue
return l
def get_requested_third_party_permissions(self):
"""
Returns list of requested permissions not declared within AOSP project.
:rtype: list of strings
"""
third_party_permissions = []
all_permissions = self.get_permissions()
for perm in all_permissions:
if perm not in list(self.permission_module.keys()):
third_party_permissions.append(perm)
return third_party_permissions
def get_declared_permissions(self):
"""
Returns list of the declared permissions.
:rtype: list of strings
"""
return list(self.declared_permissions.keys())
def get_declared_permissions_details(self):
"""
Returns declared permissions with the details.
:rtype: dict
"""
return self.declared_permissions
def get_max_sdk_version(self):
"""
Return the android:maxSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "maxSdkVersion")
def get_min_sdk_version(self):
"""
Return the android:minSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "minSdkVersion")
def get_target_sdk_version(self):
"""
Return the android:targetSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "targetSdkVersion")
def get_effective_target_sdk_version(self):
"""
Return the effective targetSdkVersion, always returns int > 0.
If the targetSdkVersion is not set, it defaults to 1. This is
set based on defaults as defined in:
https://developer.android.com/guide/topics/manifest/uses-sdk-element.html
:rtype: int
"""
target_sdk_version = self.get_target_sdk_version()
if not target_sdk_version:
target_sdk_version = self.get_min_sdk_version()
try:
return int(target_sdk_version)
except (ValueError, TypeError):
return 1
def get_libraries(self):
"""
Return the android:name attributes for libraries
:rtype: list
"""
return list(self.get_all_attribute_value("uses-library", "name"))
def get_features(self):
"""
Return a list of all android:names found for the tag uses-feature
in the AndroidManifest.xml
:return: list
"""
return list(self.get_all_attribute_value("uses-feature", "name"))
def is_wearable(self):
"""
Checks if this application is build for wearables by
checking if it uses the feature 'android.hardware.type.watch'
See: https://developer.android.com/training/wearables/apps/creating.html for more information.
Not every app is setting this feature (not even the example Google provides),
so it might be wise to not 100% rely on this feature.
:return: True if wearable, False otherwise
"""
return 'android.hardware.type.watch' in self.get_features()
def is_leanback(self):
"""
Checks if this application is build for TV (Leanback support)
by checkin if it uses the feature 'android.software.leanback'
:return: True if leanback feature is used, false otherwise
"""
return 'android.software.leanback' in self.get_features()
def is_androidtv(self):
"""
Checks if this application does not require a touchscreen,
as this is the rule to get into the TV section of the Play Store
See: https://developer.android.com/training/tv/start/start.html for more information.
:return: True if 'android.hardware.touchscreen' is not required, False otherwise
"""
return self.get_attribute_value(
'uses-feature', 'name', required="false",
name="android.hardware.touchscreen"
) == "android.hardware.touchscreen"
def new_zip(self, filename, deleted_files=None, new_files={}):
"""
Create a new zip file
:param filename: the output filename of the zip
:param deleted_files: a regex pattern to remove specific file
:param new_files: a dictionnary of new files
:type filename: string
:type deleted_files: None or a string
:type new_files: a dictionnary (key:filename, value:content of the file)
"""
zout = zipfile.ZipFile(filename, 'w')
for item in self.zip.infolist():
# Block one: deleted_files, or deleted_files and new_files
if deleted_files is not None:
if re.match(deleted_files, item.filename) is None:
# if the regex of deleted_files doesn't match the filename
if new_files is not False:
if item.filename in new_files:
# and if the filename is in new_files
zout.writestr(item, new_files[item.filename])
continue
# Otherwise, write the original file.
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
# Block two: deleted_files is None, new_files is not empty
elif new_files is not False:
if item.filename in new_files:
zout.writestr(item, new_files[item.filename])
else:
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
# Block three: deleted_files is None, new_files is empty.
# Just write out the default zip
else:
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
zout.close()
def get_android_manifest_axml(self):
"""
Return the :class:`AXMLPrinter` object which corresponds to the AndroidManifest.xml file
:rtype: :class:`~androguard.core.bytecodes.axml.AXMLPrinter`
"""
try:
return self.axml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_manifest_xml(self):
"""
Return the parsed xml object which corresponds to the AndroidManifest.xml file
:rtype: :class:`~lxml.etree.Element`
"""
try:
return self.xml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_resources(self):
"""
Return the :class:`~androguard.core.bytecodes.axml.ARSCParser`
object which corresponds to the resources.arsc file
:rtype: :class:`~androguard.core.bytecodes.axml.ARSCParser`
"""
try:
return self.arsc["resources.arsc"]
except KeyError:
if "resources.arsc" not in self.zip.namelist():
# There is a rare case, that no resource file is supplied.
# Maybe it was added manually, thus we check here
return None
self.arsc["resources.arsc"] = ARSCParser(self.zip.read("resources.arsc"))
return self.arsc["resources.arsc"]
def show(self):
self.get_files_types()
print("FILES: ")
for i in self.get_files():
try:
print("\t", i, self._files[i], "%x" % self.files_crc32[i])
except KeyError:
print("\t", i, "%x" % self.files_crc32[i])
print("DECLARED PERMISSIONS:")
declared_permissions = self.get_declared_permissions()
for i in declared_permissions:
print("\t", i)
print("REQUESTED PERMISSIONS:")
requested_permissions = self.get_permissions()
for i in requested_permissions:
print("\t", i)
print("MAIN ACTIVITY: ", self.get_main_activity())
print("ACTIVITIES: ")
activities = self.get_activities()
for i in activities:
filters = self.get_intent_filters("activity", i)
print("\t", i, filters or "")
print("SERVICES: ")
services = self.get_services()
for i in services:
filters = self.get_intent_filters("service", i)
print("\t", i, filters or "")
print("RECEIVERS: ")
receivers = self.get_receivers()
for i in receivers:
filters = self.get_intent_filters("receiver", i)
print("\t", i, filters or "")
print("PROVIDERS: ", self.get_providers())
@property
def application(self):
return self.get_app_name()
@property
def packagename(self):
return self.get_package()
@property
def version_name(self):
return self.get_androidversion_name()
@property
def version_code(self):
return self.get_androidversion_code()
@property
def icon_info(self):
return self.get_app_icon()
@property
def icon_data(self):
app_icon_file = self.get_app_icon()
app_icon_data = None
try:
app_icon_data = self.get_file(app_icon_file)
except FileNotPresent:
try:
app_icon_data = self.get_file(app_icon_file.encode().decode('cp437'))
except FileNotPresent:
pass
return app_icon_data
|
appknox/pyaxmlparser | pyaxmlparser/core.py | APK._get_file_magic_name | python | def _get_file_magic_name(self, buffer):
default = "Unknown"
ftype = None
try:
# Magic is optional
import magic
except ImportError:
return default
try:
# There are several implementations of magic,
# unfortunately all called magic
# We use this one: https://github.com/ahupp/python-magic/
getattr(magic, "MagicException")
except AttributeError:
# Looks like no magic was installed
return default
try:
ftype = magic.from_buffer(buffer[:1024])
except magic.MagicError as e:
log.exception("Error getting the magic type!")
return default
if not ftype:
return default
else:
return self._patch_magic(buffer, ftype) | Return the filetype guessed for a buffer
:param buffer: bytes
:return: str of filetype | train | https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/core.py#L451-L484 | [
"def _patch_magic(self, buffer, orig):\n \"\"\"\n Overwrite some probably wrong detections by mime libraries\n\n :param buffer: bytes of the file to detect\n :param orig: guess by mime libary\n :return: corrected guess\n \"\"\"\n if (\"Zip\" in orig) or ('(JAR)' in orig):\n val = is_andr... | class APK(object):
def __init__(self, filename, raw=False, magic_file=None, skip_analysis=False, testzip=False):
"""
This class can access to all elements in an APK file
example::
APK("myfile.apk")
APK(read("myfile.apk"), raw=True)
:param filename: specify the path of the file, or raw data
:param raw: specify if the filename is a path or raw data (optional)
:param magic_file: specify the magic file (not used anymore - legacy only)
:param skip_analysis: Skip the analysis, e.g. no manifest files are read. (default: False)
:param testzip: Test the APK for integrity, e.g. if the ZIP file is broken.
Throw an exception on failure (default False)
:type filename: string
:type raw: boolean
:type magic_file: string
:type skip_analysis: boolean
:type testzip: boolean
"""
if magic_file:
log.warning("You set magic_file but this parameter is actually unused. You should remove it.")
self.filename = filename
self.xml = {}
self.axml = {}
self.arsc = {}
self.package = ""
self.androidversion = {}
self.permissions = []
self.uses_permissions = []
self.declared_permissions = {}
self.valid_apk = False
self._files = {}
self.files_crc32 = {}
if raw is True:
self.__raw = bytearray(filename)
self._sha256 = hashlib.sha256(self.__raw).hexdigest()
# Set the filename to something sane
self.filename = "raw_apk_sha256:{}".format(self._sha256)
else:
self.__raw = bytearray(read(filename))
self.zip = zipfile.ZipFile(io.BytesIO(self.__raw), mode="r")
if testzip:
# Test the zipfile for integrity before continuing.
# This process might be slow, as the whole file is read.
# Therefore it is possible to enable it as a separate feature.
#
# A short benchmark showed, that testing the zip takes about 10 times longer!
# e.g. normal zip loading (skip_analysis=True) takes about 0.01s, where
# testzip takes 0.1s!
ret = self.zip.testzip()
if ret is not None:
# we could print the filename here, but there are zip which are so broken
# That the filename is either very very long or does not make any sense.
# Thus we do not do it, the user might find out by using other tools.
raise BrokenAPKError("The APK is probably broken: testzip returned an error.")
if not skip_analysis:
self._apk_analysis()
def _ns(self, name):
"""
return the name including the Android namespace
"""
return NS_ANDROID + name
def _apk_analysis(self):
"""
Run analysis on the APK file.
This method is usually called by __init__ except if skip_analysis is False.
It will then parse the AndroidManifest.xml and set all fields in the APK class which can be
extracted from the Manifest.
"""
i = "AndroidManifest.xml"
try:
manifest_data = self.zip.read(i)
except KeyError:
log.warning("Missing AndroidManifest.xml. Is this an APK file?")
else:
ap = AXMLPrinter(manifest_data)
if not ap.is_valid():
log.error("Error while parsing AndroidManifest.xml - is the file valid?")
return
self.axml[i] = ap
self.xml[i] = self.axml[i].get_xml_obj()
if self.axml[i].is_packed():
log.warning("XML Seems to be packed, operations on the AndroidManifest.xml might fail.")
if self.xml[i] is not None:
if self.xml[i].tag != "manifest":
log.error("AndroidManifest.xml does not start with a <manifest> tag! Is this a valid APK?")
return
self.package = self.get_attribute_value("manifest", "package")
self.androidversion["Code"] = self.get_attribute_value("manifest", "versionCode")
self.androidversion["Name"] = self.get_attribute_value("manifest", "versionName")
permission = list(self.get_all_attribute_value("uses-permission", "name"))
self.permissions = list(set(self.permissions + permission))
for uses_permission in self.find_tags("uses-permission"):
self.uses_permissions.append([
self.get_value_from_tag(uses_permission, "name"),
self._get_permission_maxsdk(uses_permission)
])
# getting details of the declared permissions
for d_perm_item in self.find_tags('permission'):
d_perm_name = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "name")))
d_perm_label = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "label")))
d_perm_description = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "description")))
d_perm_permissionGroup = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "permissionGroup")))
d_perm_protectionLevel = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "protectionLevel")))
d_perm_details = {
"label": d_perm_label,
"description": d_perm_description,
"permissionGroup": d_perm_permissionGroup,
"protectionLevel": d_perm_protectionLevel,
}
self.declared_permissions[d_perm_name] = d_perm_details
self.valid_apk = True
def __getstate__(self):
"""
Function for pickling APK Objects.
We remove the zip from the Object, as it is not pickable
And it does not make any sense to pickle it anyways.
:return: the picklable APK Object without zip.
"""
# Upon pickling, we need to remove the ZipFile
x = self.__dict__
x['axml'] = str(x['axml'])
x['xml'] = str(x['xml'])
del x['zip']
return x
def __setstate__(self, state):
"""
Load a pickled APK Object and restore the state
We load the zip file back by reading __raw from the Object.
:param state: pickled state
"""
self.__dict__ = state
self.zip = zipfile.ZipFile(io.BytesIO(self.__raw), mode="r")
def _get_res_string_value(self, string):
if not string.startswith('@string/'):
return string
string_key = string[9:]
res_parser = self.get_android_resources()
if not res_parser:
return ''
string_value = ''
for package_name in res_parser.get_packages_names():
extracted_values = res_parser.get_string(package_name, string_key)
if extracted_values:
string_value = extracted_values[1]
break
return string_value
def _get_permission_maxsdk(self, item):
maxSdkVersion = None
try:
maxSdkVersion = int(self.get_value_from_tag(item, "maxSdkVersion"))
except ValueError:
log.warning(self.get_max_sdk_version() + 'is not a valid value for <uses-permission> maxSdkVersion')
except TypeError:
pass
return maxSdkVersion
def is_valid_APK(self):
"""
Return true if the APK is valid, false otherwise.
An APK is seen as valid, if the AndroidManifest.xml could be successful parsed.
This does not mean that the APK has a valid signature nor that the APK
can be installed on an Android system.
:rtype: boolean
"""
return self.valid_apk
def get_filename(self):
"""
Return the filename of the APK
:rtype: :class:`str`
"""
return self.filename
def get_app_name(self):
"""
Return the appname of the APK
This name is read from the AndroidManifest.xml
using the application android:label.
If no label exists, the android:label of the main activity is used.
If there is also no main activity label, an empty string is returned.
:rtype: :class:`str`
"""
app_name = self.get_attribute_value('application', 'label')
if app_name is None:
activities = self.get_main_activities()
main_activity_name = None
if len(activities) > 0:
main_activity_name = activities.pop()
app_name = self.get_attribute_value(
'activity', 'label', name=main_activity_name
)
if app_name is None:
# No App name set
# TODO return packagename instead?
log.warning("It looks like that no app name is set for the main activity!")
return ""
if app_name.startswith("@"):
res_parser = self.get_android_resources()
if not res_parser:
# TODO: What should be the correct return value here?
return app_name
res_id, package = res_parser.parse_id(app_name)
# If the package name is the same as the APK package,
# we should be able to resolve the ID.
if package and package != self.get_package():
if package == 'android':
# TODO: we can not resolve this, as we lack framework-res.apk
# one exception would be when parsing framework-res.apk directly.
log.warning("Resource ID with android package name encountered! "
"Will not resolve, framework-res.apk would be required.")
return app_name
else:
# TODO should look this up, might be in the resources
log.warning("Resource ID with Package name '{}' encountered! Will not resolve".format(package))
return app_name
try:
app_name = res_parser.get_resolved_res_configs(
res_id,
ARSCResTableConfig.default_config())[0][1]
except Exception as e:
log.warning("Exception selecting app name: %s" % e)
return app_name
def get_app_icon(self, max_dpi=65536):
"""
Return the first icon file name, which density is not greater than max_dpi,
unless exact icon resolution is set in the manifest, in which case
return the exact file.
This information is read from the AndroidManifest.xml
From https://developer.android.com/guide/practices/screens_support.html
and https://developer.android.com/ndk/reference/group___configuration.html
* DEFAULT 0dpi
* ldpi (low) 120dpi
* mdpi (medium) 160dpi
* TV 213dpi
* hdpi (high) 240dpi
* xhdpi (extra-high) 320dpi
* xxhdpi (extra-extra-high) 480dpi
* xxxhdpi (extra-extra-extra-high) 640dpi
* anydpi 65534dpi (0xFFFE)
* nodpi 65535dpi (0xFFFF)
There is a difference between nodpi and anydpi:
nodpi will be used if no other density is specified. Or the density does not match.
nodpi is the fallback for everything else. If there is a resource that matches the DPI,
this is used.
anydpi is also valid for all densities but in this case, anydpi will overrule all other files!
Therefore anydpi is usually used with vector graphics and with constraints on the API level.
For example adaptive icons are usually marked as anydpi.
When it comes now to selecting an icon, there is the following flow:
1) is there an anydpi icon?
2) is there an icon for the dpi of the device?
3) is there a nodpi icon?
4) (only on very old devices) is there a icon with dpi 0 (the default)
For more information read here: https://stackoverflow.com/a/34370735/446140
:rtype: :class:`str`
"""
main_activity_name = self.get_main_activity()
app_icon = self.get_attribute_value(
'activity', 'icon', name=main_activity_name)
if not app_icon:
app_icon = self.get_attribute_value('application', 'icon')
res_parser = self.get_android_resources()
if not res_parser:
# Can not do anything below this point to resolve...
return None
if not app_icon:
res_id = res_parser.get_res_id_by_key(self.package, 'mipmap', 'ic_launcher')
if res_id:
app_icon = "@%x" % res_id
if not app_icon:
res_id = res_parser.get_res_id_by_key(self.package, 'drawable', 'ic_launcher')
if res_id:
app_icon = "@%x" % res_id
if not app_icon:
# If the icon can not be found, return now
return None
if app_icon.startswith("@"):
res_id = int(app_icon[1:], 16)
candidates = res_parser.get_resolved_res_configs(res_id)
app_icon = None
current_dpi = -1
try:
for config, file_name in candidates:
dpi = config.get_density()
if current_dpi < dpi <= max_dpi:
app_icon = file_name
current_dpi = dpi
except Exception as e:
log.warning("Exception selecting app icon: %s" % e)
return app_icon
def get_package(self):
"""
Return the name of the package
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.package
def get_androidversion_code(self):
"""
Return the android version code
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.androidversion["Code"]
def get_androidversion_name(self):
"""
Return the android version name
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.androidversion["Name"]
def get_files(self):
"""
Return the file names inside the APK.
:rtype: a list of :class:`str`
"""
return self.zip.namelist()
@property
def files(self):
"""
Returns a dictionary of filenames and detected magic type
:return: dictionary of files and their mime type
"""
return self.get_files_types()
def get_files_types(self):
"""
Return the files inside the APK with their associated types (by using python-magic)
:rtype: a dictionnary
"""
if self._files == {}:
# Generate File Types / CRC List
for i in self.get_files():
buffer = self.zip.read(i)
self.files_crc32[i] = crc32(buffer)
# FIXME why not use the crc from the zipfile?
# should be validated as well.
# crc = self.zip.getinfo(i).CRC
self._files[i] = self._get_file_magic_name(buffer)
return self._files
def _patch_magic(self, buffer, orig):
"""
Overwrite some probably wrong detections by mime libraries
:param buffer: bytes of the file to detect
:param orig: guess by mime libary
:return: corrected guess
"""
if ("Zip" in orig) or ('(JAR)' in orig):
val = is_android_raw(buffer)
if val == "APK":
return "Android application package file"
return orig
def get_files_crc32(self):
"""
Calculates and returns a dictionary of filenames and CRC32
:return: dict of filename: CRC32
"""
if self.files_crc32 == {}:
for i in self.get_files():
buffer = self.zip.read(i)
self.files_crc32[i] = crc32(buffer)
return self.files_crc32
def get_files_information(self):
"""
Return the files inside the APK with their associated types and crc32
:rtype: str, str, int
"""
for k in self.get_files():
yield k, self.get_files_types()[k], self.get_files_crc32()[k]
def get_raw(self):
"""
Return raw bytes of the APK
:rtype: bytes
"""
return self.__raw
def get_file(self, filename):
"""
Return the raw data of the specified filename
inside the APK
:rtype: bytes
"""
try:
return self.zip.read(filename)
except KeyError:
raise FileNotPresent(filename)
def get_dex(self):
"""
Return the raw data of the classes dex file
This will give you the data of the file called `classes.dex`
inside the APK. If the APK has multiple DEX files, you need to use :func:`~APK.get_all_dex`.
:rtype: bytes
"""
try:
return self.get_file("classes.dex")
except FileNotPresent:
return ""
def get_dex_names(self):
"""
Return the names of all DEX files found in the APK.
This method only accounts for "offical" dex files, i.e. all files
in the root directory of the APK named classes.dex or classes[0-9]+.dex
:rtype: a list of str
"""
dexre = re.compile("classes(\d*).dex")
return filter(lambda x: dexre.match(x), self.get_files())
def get_all_dex(self):
"""
Return the raw data of all classes dex files
:rtype: a generator of bytes
"""
for dex_name in self.get_dex_names():
yield self.get_file(dex_name)
def is_multidex(self):
"""
Test if the APK has multiple DEX files
:return: True if multiple dex found, otherwise False
"""
dexre = re.compile("^classes(\d+)?.dex$")
return len([instance for instance in self.get_files() if dexre.search(instance)]) > 1
@DeprecationWarning
def get_elements(self, tag_name, attribute, with_namespace=True):
"""
Deprecated: use `get_all_attribute_value()` instead
Return elements in xml files which match with the tag name and the specific attribute
:param tag_name: a string which specify the tag name
:param attribute: a string which specify the attribute
"""
for i in self.xml:
if self.xml[i] is None:
continue
for item in self.xml[i].findall('.//' + tag_name):
if with_namespace:
value = item.get(self._ns(attribute))
else:
value = item.get(attribute)
# There might be an attribute without the namespace
if value:
yield self._format_value(value)
def _format_value(self, value):
"""
Format a value with packagename, if not already set
:param value:
:return:
"""
if len(value) > 0:
if value[0] == ".":
value = self.package + value
else:
v_dot = value.find(".")
if v_dot == 0:
value = self.package + "." + value
elif v_dot == -1:
value = self.package + "." + value
return value
@DeprecationWarning
def get_element(self, tag_name, attribute, **attribute_filter):
"""
:Deprecated: use `get_attribute_value()` instead
Return element in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:rtype: string
"""
for i in self.xml:
if self.xml[i] is None:
continue
tag = self.xml[i].findall('.//' + tag_name)
if len(tag) == 0:
return None
for item in tag:
skip_this_item = False
for attr, val in list(attribute_filter.items()):
attr_val = item.get(self._ns(attr))
if attr_val != val:
skip_this_item = True
break
if skip_this_item:
continue
value = item.get(self._ns(attribute))
if value is not None:
return value
return None
def get_all_attribute_value(
self, tag_name, attribute, format_value=True, **attribute_filter
):
"""
Return all the attribute values in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:param format_value: specify if the value needs to be formatted with packagename
:type format_value: boolean
"""
tags = self.find_tags(tag_name, **attribute_filter)
for tag in tags:
value = tag.get(attribute) or tag.get(self._ns(attribute))
if value is not None:
if format_value:
yield self._format_value(value)
else:
yield value
def get_attribute_value(
self, tag_name, attribute, format_value=False, **attribute_filter
):
"""
Return the attribute value in xml files which matches the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:param format_value: specify if the value needs to be formatted with packagename
:type format_value: boolean
"""
for value in self.get_all_attribute_value(
tag_name, attribute, format_value, **attribute_filter):
if value is not None:
return value
def get_value_from_tag(self, tag, attribute):
"""
Return the value of the attribute in a specific tag
:param tag: specify the tag element
:type tag: Element
:param attribute: specify the attribute
:type attribute: string
"""
# TODO: figure out if both android:name and name tag exist which one to give preference
value = tag.get(self._ns(attribute))
if value is None:
log.warning("Failed to get the attribute with namespace")
value = tag.get(attribute)
return value
def find_tags(self, tag_name, **attribute_filter):
"""
Return a list of all the matched tags in all available xml
:param tag: specify the tag name
:type tag: string
"""
all_tags = [
self.find_tags_from_xml(
i, tag_name, **attribute_filter
)
for i in self.xml
]
return [tag for tag_list in all_tags for tag in tag_list]
def find_tags_from_xml(
self, xml_name, tag_name, **attribute_filter
):
"""
Return a list of all the matched tags in a specific xml
:param xml_name: specify from which xml to pick the tag from
:type xml_name: string
:param tag_name: specify the tag name
:type tag_name: string
"""
xml = self.xml[xml_name]
if xml is None:
return []
if xml.tag == tag_name:
if self.is_tag_matched(
xml.tag, **attribute_filter
):
return [xml]
return []
tags = xml.findall(".//" + tag_name)
return [
tag for tag in tags if self.is_tag_matched(
tag, **attribute_filter
)
]
def is_tag_matched(self, tag, **attribute_filter):
"""
Return true if the attributes matches in attribute filter
:param tag: specify the tag element
:type tag: Element
:param attribute: specify the attribute
:type attribute: string
"""
if len(attribute_filter) <= 0:
return True
for attr, value in attribute_filter.items():
# TODO: figure out if both android:name and name tag exist which one to give preference
_value = tag.get(self._ns(attr))
if _value is None:
log.warning("Failed to get the attribute with namespace")
_value = tag.get(attr)
if _value != value:
return False
return True
def get_main_activities(self):
"""
Return names of the main activities
These values are read from the AndroidManifest.xml
:rtype: a set of str
"""
x = set()
y = set()
for i in self.xml:
if self.xml[i] is None:
continue
activities_and_aliases = self.xml[i].findall(".//activity") + \
self.xml[i].findall(".//activity-alias")
for item in activities_and_aliases:
# Some applications have more than one MAIN activity.
# For example: paid and free content
activityEnabled = item.get(self._ns("enabled"))
if activityEnabled == "false":
continue
for sitem in item.findall(".//action"):
val = sitem.get(self._ns("name"))
if val == "android.intent.action.MAIN":
activity = item.get(self._ns("name"))
if activity is not None:
x.add(item.get(self._ns("name")))
else:
log.warning('Main activity without name')
for sitem in item.findall(".//category"):
val = sitem.get(self._ns("name"))
if val == "android.intent.category.LAUNCHER":
activity = item.get(self._ns("name"))
if activity is not None:
y.add(item.get(self._ns("name")))
else:
log.warning('Launcher activity without name')
return x.intersection(y)
def get_main_activity(self):
"""
Return the name of the main activity
This value is read from the AndroidManifest.xml
:rtype: str
"""
activities = self.get_main_activities()
if len(activities) > 0:
return self._format_value(activities.pop())
return None
def get_activities(self):
"""
Return the android:name attribute of all activities
:rtype: a list of str
"""
return list(self.get_all_attribute_value("activity", "name"))
def get_services(self):
"""
Return the android:name attribute of all services
:rtype: a list of str
"""
return list(self.get_all_attribute_value("service", "name"))
def get_receivers(self):
"""
Return the android:name attribute of all receivers
:rtype: a list of string
"""
return list(self.get_all_attribute_value("receiver", "name"))
def get_providers(self):
"""
Return the android:name attribute of all providers
:rtype: a list of string
"""
return list(self.get_all_attribute_value("provider", "name"))
def get_intent_filters(self, itemtype, name):
"""
Find intent filters for a given item and name.
Intent filter are attached to activities, services or receivers.
You can search for the intent filters of such items and get a dictionary of all
attached actions and intent categories.
:param itemtype: the type of parent item to look for, e.g. `activity`, `service` or `receiver`
:param name: the `android:name` of the parent item, e.g. activity name
:return: a dictionary with the keys `action` and `category` containing the `android:name` of those items
"""
d = {"action": [], "category": []}
for i in self.xml:
# TODO: this can probably be solved using a single xpath
for item in self.xml[i].findall(".//" + itemtype):
if self._format_value(item.get(self._ns("name"))) == name:
for sitem in item.findall(".//intent-filter"):
for ssitem in sitem.findall("action"):
if ssitem.get(self._ns("name")) not in d["action"]:
d["action"].append(ssitem.get(self._ns("name")))
for ssitem in sitem.findall("category"):
if ssitem.get(self._ns("name")) not in d["category"]:
d["category"].append(ssitem.get(self._ns("name")))
if not d["action"]:
del d["action"]
if not d["category"]:
del d["category"]
return d
def get_permissions(self):
"""
Return permissions names declared in the AndroidManifest.xml.
It is possible that permissions are returned multiple times,
as this function does not filter the permissions, i.e. it shows you
exactly what was defined in the AndroidManifest.xml.
Implied permissions, which are granted automatically, are not returned
here. Use :meth:`get_uses_implied_permission_list` if you need a list
of implied permissions.
:returns: A list of permissions
:rtype: list
"""
return self.permissions
def get_uses_implied_permission_list(self):
"""
Return all permissions implied by the target SDK or other permissions.
:rtype: list of string
"""
target_sdk_version = self.get_effective_target_sdk_version()
READ_CALL_LOG = 'android.permission.READ_CALL_LOG'
READ_CONTACTS = 'android.permission.READ_CONTACTS'
READ_EXTERNAL_STORAGE = 'android.permission.READ_EXTERNAL_STORAGE'
READ_PHONE_STATE = 'android.permission.READ_PHONE_STATE'
WRITE_CALL_LOG = 'android.permission.WRITE_CALL_LOG'
WRITE_CONTACTS = 'android.permission.WRITE_CONTACTS'
WRITE_EXTERNAL_STORAGE = 'android.permission.WRITE_EXTERNAL_STORAGE'
implied = []
implied_WRITE_EXTERNAL_STORAGE = False
if target_sdk_version < 4:
if WRITE_EXTERNAL_STORAGE not in self.permissions:
implied.append([WRITE_EXTERNAL_STORAGE, None])
implied_WRITE_EXTERNAL_STORAGE = True
if READ_PHONE_STATE not in self.permissions:
implied.append([READ_PHONE_STATE, None])
if (WRITE_EXTERNAL_STORAGE in self.permissions or implied_WRITE_EXTERNAL_STORAGE) \
and READ_EXTERNAL_STORAGE not in self.permissions:
maxSdkVersion = None
for name, version in self.uses_permissions:
if name == WRITE_EXTERNAL_STORAGE:
maxSdkVersion = version
break
implied.append([READ_EXTERNAL_STORAGE, maxSdkVersion])
if target_sdk_version < 16:
if READ_CONTACTS in self.permissions \
and READ_CALL_LOG not in self.permissions:
implied.append([READ_CALL_LOG, None])
if WRITE_CONTACTS in self.permissions \
and WRITE_CALL_LOG not in self.permissions:
implied.append([WRITE_CALL_LOG, None])
return implied
def get_details_permissions(self):
"""
Return permissions with details
:rtype: dict of {permission: [protectionLevel, label, description]}
"""
l = {}
for i in self.permissions:
if i in self.permission_module:
x = self.permission_module[i]
l[i] = [x["protectionLevel"], x["label"], x["description"]]
else:
# FIXME: the permission might be signature, if it is defined by the app itself!
l[i] = ["normal", "Unknown permission from android reference",
"Unknown permission from android reference"]
return l
@DeprecationWarning
def get_requested_permissions(self):
"""
Returns all requested permissions.
It has the same result as :meth:`get_permissions` and might be removed in the future
:rtype: list of str
"""
return self.get_permissions()
def get_requested_aosp_permissions(self):
"""
Returns requested permissions declared within AOSP project.
This includes several other permissions as well, which are in the platform apps.
:rtype: list of str
"""
aosp_permissions = []
all_permissions = self.get_permissions()
for perm in all_permissions:
if perm in list(self.permission_module.keys()):
aosp_permissions.append(perm)
return aosp_permissions
def get_requested_aosp_permissions_details(self):
"""
Returns requested aosp permissions with details.
:rtype: dictionary
"""
l = {}
for i in self.permissions:
try:
l[i] = self.permission_module[i]
except KeyError:
# if we have not found permission do nothing
continue
return l
def get_requested_third_party_permissions(self):
"""
Returns list of requested permissions not declared within AOSP project.
:rtype: list of strings
"""
third_party_permissions = []
all_permissions = self.get_permissions()
for perm in all_permissions:
if perm not in list(self.permission_module.keys()):
third_party_permissions.append(perm)
return third_party_permissions
def get_declared_permissions(self):
"""
Returns list of the declared permissions.
:rtype: list of strings
"""
return list(self.declared_permissions.keys())
def get_declared_permissions_details(self):
"""
Returns declared permissions with the details.
:rtype: dict
"""
return self.declared_permissions
def get_max_sdk_version(self):
"""
Return the android:maxSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "maxSdkVersion")
def get_min_sdk_version(self):
"""
Return the android:minSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "minSdkVersion")
def get_target_sdk_version(self):
"""
Return the android:targetSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "targetSdkVersion")
def get_effective_target_sdk_version(self):
"""
Return the effective targetSdkVersion, always returns int > 0.
If the targetSdkVersion is not set, it defaults to 1. This is
set based on defaults as defined in:
https://developer.android.com/guide/topics/manifest/uses-sdk-element.html
:rtype: int
"""
target_sdk_version = self.get_target_sdk_version()
if not target_sdk_version:
target_sdk_version = self.get_min_sdk_version()
try:
return int(target_sdk_version)
except (ValueError, TypeError):
return 1
def get_libraries(self):
"""
Return the android:name attributes for libraries
:rtype: list
"""
return list(self.get_all_attribute_value("uses-library", "name"))
def get_features(self):
"""
Return a list of all android:names found for the tag uses-feature
in the AndroidManifest.xml
:return: list
"""
return list(self.get_all_attribute_value("uses-feature", "name"))
def is_wearable(self):
"""
Checks if this application is build for wearables by
checking if it uses the feature 'android.hardware.type.watch'
See: https://developer.android.com/training/wearables/apps/creating.html for more information.
Not every app is setting this feature (not even the example Google provides),
so it might be wise to not 100% rely on this feature.
:return: True if wearable, False otherwise
"""
return 'android.hardware.type.watch' in self.get_features()
def is_leanback(self):
"""
Checks if this application is build for TV (Leanback support)
by checkin if it uses the feature 'android.software.leanback'
:return: True if leanback feature is used, false otherwise
"""
return 'android.software.leanback' in self.get_features()
def is_androidtv(self):
"""
Checks if this application does not require a touchscreen,
as this is the rule to get into the TV section of the Play Store
See: https://developer.android.com/training/tv/start/start.html for more information.
:return: True if 'android.hardware.touchscreen' is not required, False otherwise
"""
return self.get_attribute_value(
'uses-feature', 'name', required="false",
name="android.hardware.touchscreen"
) == "android.hardware.touchscreen"
def new_zip(self, filename, deleted_files=None, new_files={}):
"""
Create a new zip file
:param filename: the output filename of the zip
:param deleted_files: a regex pattern to remove specific file
:param new_files: a dictionnary of new files
:type filename: string
:type deleted_files: None or a string
:type new_files: a dictionnary (key:filename, value:content of the file)
"""
zout = zipfile.ZipFile(filename, 'w')
for item in self.zip.infolist():
# Block one: deleted_files, or deleted_files and new_files
if deleted_files is not None:
if re.match(deleted_files, item.filename) is None:
# if the regex of deleted_files doesn't match the filename
if new_files is not False:
if item.filename in new_files:
# and if the filename is in new_files
zout.writestr(item, new_files[item.filename])
continue
# Otherwise, write the original file.
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
# Block two: deleted_files is None, new_files is not empty
elif new_files is not False:
if item.filename in new_files:
zout.writestr(item, new_files[item.filename])
else:
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
# Block three: deleted_files is None, new_files is empty.
# Just write out the default zip
else:
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
zout.close()
def get_android_manifest_axml(self):
"""
Return the :class:`AXMLPrinter` object which corresponds to the AndroidManifest.xml file
:rtype: :class:`~androguard.core.bytecodes.axml.AXMLPrinter`
"""
try:
return self.axml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_manifest_xml(self):
"""
Return the parsed xml object which corresponds to the AndroidManifest.xml file
:rtype: :class:`~lxml.etree.Element`
"""
try:
return self.xml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_resources(self):
"""
Return the :class:`~androguard.core.bytecodes.axml.ARSCParser`
object which corresponds to the resources.arsc file
:rtype: :class:`~androguard.core.bytecodes.axml.ARSCParser`
"""
try:
return self.arsc["resources.arsc"]
except KeyError:
if "resources.arsc" not in self.zip.namelist():
# There is a rare case, that no resource file is supplied.
# Maybe it was added manually, thus we check here
return None
self.arsc["resources.arsc"] = ARSCParser(self.zip.read("resources.arsc"))
return self.arsc["resources.arsc"]
def show(self):
self.get_files_types()
print("FILES: ")
for i in self.get_files():
try:
print("\t", i, self._files[i], "%x" % self.files_crc32[i])
except KeyError:
print("\t", i, "%x" % self.files_crc32[i])
print("DECLARED PERMISSIONS:")
declared_permissions = self.get_declared_permissions()
for i in declared_permissions:
print("\t", i)
print("REQUESTED PERMISSIONS:")
requested_permissions = self.get_permissions()
for i in requested_permissions:
print("\t", i)
print("MAIN ACTIVITY: ", self.get_main_activity())
print("ACTIVITIES: ")
activities = self.get_activities()
for i in activities:
filters = self.get_intent_filters("activity", i)
print("\t", i, filters or "")
print("SERVICES: ")
services = self.get_services()
for i in services:
filters = self.get_intent_filters("service", i)
print("\t", i, filters or "")
print("RECEIVERS: ")
receivers = self.get_receivers()
for i in receivers:
filters = self.get_intent_filters("receiver", i)
print("\t", i, filters or "")
print("PROVIDERS: ", self.get_providers())
@property
def application(self):
return self.get_app_name()
@property
def packagename(self):
return self.get_package()
@property
def version_name(self):
return self.get_androidversion_name()
@property
def version_code(self):
return self.get_androidversion_code()
@property
def icon_info(self):
return self.get_app_icon()
@property
def icon_data(self):
app_icon_file = self.get_app_icon()
app_icon_data = None
try:
app_icon_data = self.get_file(app_icon_file)
except FileNotPresent:
try:
app_icon_data = self.get_file(app_icon_file.encode().decode('cp437'))
except FileNotPresent:
pass
return app_icon_data
|
appknox/pyaxmlparser | pyaxmlparser/core.py | APK.get_files_types | python | def get_files_types(self):
if self._files == {}:
# Generate File Types / CRC List
for i in self.get_files():
buffer = self.zip.read(i)
self.files_crc32[i] = crc32(buffer)
# FIXME why not use the crc from the zipfile?
# should be validated as well.
# crc = self.zip.getinfo(i).CRC
self._files[i] = self._get_file_magic_name(buffer)
return self._files | Return the files inside the APK with their associated types (by using python-magic)
:rtype: a dictionnary | train | https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/core.py#L495-L511 | [
"def get_files(self):\n \"\"\"\n Return the file names inside the APK.\n\n :rtype: a list of :class:`str`\n \"\"\"\n return self.zip.namelist()\n",
"def _get_file_magic_name(self, buffer):\n \"\"\"\n Return the filetype guessed for a buffer\n :param buffer: bytes\n :return: str of filet... | class APK(object):
def __init__(self, filename, raw=False, magic_file=None, skip_analysis=False, testzip=False):
"""
This class can access to all elements in an APK file
example::
APK("myfile.apk")
APK(read("myfile.apk"), raw=True)
:param filename: specify the path of the file, or raw data
:param raw: specify if the filename is a path or raw data (optional)
:param magic_file: specify the magic file (not used anymore - legacy only)
:param skip_analysis: Skip the analysis, e.g. no manifest files are read. (default: False)
:param testzip: Test the APK for integrity, e.g. if the ZIP file is broken.
Throw an exception on failure (default False)
:type filename: string
:type raw: boolean
:type magic_file: string
:type skip_analysis: boolean
:type testzip: boolean
"""
if magic_file:
log.warning("You set magic_file but this parameter is actually unused. You should remove it.")
self.filename = filename
self.xml = {}
self.axml = {}
self.arsc = {}
self.package = ""
self.androidversion = {}
self.permissions = []
self.uses_permissions = []
self.declared_permissions = {}
self.valid_apk = False
self._files = {}
self.files_crc32 = {}
if raw is True:
self.__raw = bytearray(filename)
self._sha256 = hashlib.sha256(self.__raw).hexdigest()
# Set the filename to something sane
self.filename = "raw_apk_sha256:{}".format(self._sha256)
else:
self.__raw = bytearray(read(filename))
self.zip = zipfile.ZipFile(io.BytesIO(self.__raw), mode="r")
if testzip:
# Test the zipfile for integrity before continuing.
# This process might be slow, as the whole file is read.
# Therefore it is possible to enable it as a separate feature.
#
# A short benchmark showed, that testing the zip takes about 10 times longer!
# e.g. normal zip loading (skip_analysis=True) takes about 0.01s, where
# testzip takes 0.1s!
ret = self.zip.testzip()
if ret is not None:
# we could print the filename here, but there are zip which are so broken
# That the filename is either very very long or does not make any sense.
# Thus we do not do it, the user might find out by using other tools.
raise BrokenAPKError("The APK is probably broken: testzip returned an error.")
if not skip_analysis:
self._apk_analysis()
def _ns(self, name):
"""
return the name including the Android namespace
"""
return NS_ANDROID + name
def _apk_analysis(self):
"""
Run analysis on the APK file.
This method is usually called by __init__ except if skip_analysis is False.
It will then parse the AndroidManifest.xml and set all fields in the APK class which can be
extracted from the Manifest.
"""
i = "AndroidManifest.xml"
try:
manifest_data = self.zip.read(i)
except KeyError:
log.warning("Missing AndroidManifest.xml. Is this an APK file?")
else:
ap = AXMLPrinter(manifest_data)
if not ap.is_valid():
log.error("Error while parsing AndroidManifest.xml - is the file valid?")
return
self.axml[i] = ap
self.xml[i] = self.axml[i].get_xml_obj()
if self.axml[i].is_packed():
log.warning("XML Seems to be packed, operations on the AndroidManifest.xml might fail.")
if self.xml[i] is not None:
if self.xml[i].tag != "manifest":
log.error("AndroidManifest.xml does not start with a <manifest> tag! Is this a valid APK?")
return
self.package = self.get_attribute_value("manifest", "package")
self.androidversion["Code"] = self.get_attribute_value("manifest", "versionCode")
self.androidversion["Name"] = self.get_attribute_value("manifest", "versionName")
permission = list(self.get_all_attribute_value("uses-permission", "name"))
self.permissions = list(set(self.permissions + permission))
for uses_permission in self.find_tags("uses-permission"):
self.uses_permissions.append([
self.get_value_from_tag(uses_permission, "name"),
self._get_permission_maxsdk(uses_permission)
])
# getting details of the declared permissions
for d_perm_item in self.find_tags('permission'):
d_perm_name = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "name")))
d_perm_label = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "label")))
d_perm_description = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "description")))
d_perm_permissionGroup = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "permissionGroup")))
d_perm_protectionLevel = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "protectionLevel")))
d_perm_details = {
"label": d_perm_label,
"description": d_perm_description,
"permissionGroup": d_perm_permissionGroup,
"protectionLevel": d_perm_protectionLevel,
}
self.declared_permissions[d_perm_name] = d_perm_details
self.valid_apk = True
def __getstate__(self):
"""
Function for pickling APK Objects.
We remove the zip from the Object, as it is not pickable
And it does not make any sense to pickle it anyways.
:return: the picklable APK Object without zip.
"""
# Upon pickling, we need to remove the ZipFile
x = self.__dict__
x['axml'] = str(x['axml'])
x['xml'] = str(x['xml'])
del x['zip']
return x
def __setstate__(self, state):
"""
Load a pickled APK Object and restore the state
We load the zip file back by reading __raw from the Object.
:param state: pickled state
"""
self.__dict__ = state
self.zip = zipfile.ZipFile(io.BytesIO(self.__raw), mode="r")
def _get_res_string_value(self, string):
if not string.startswith('@string/'):
return string
string_key = string[9:]
res_parser = self.get_android_resources()
if not res_parser:
return ''
string_value = ''
for package_name in res_parser.get_packages_names():
extracted_values = res_parser.get_string(package_name, string_key)
if extracted_values:
string_value = extracted_values[1]
break
return string_value
def _get_permission_maxsdk(self, item):
maxSdkVersion = None
try:
maxSdkVersion = int(self.get_value_from_tag(item, "maxSdkVersion"))
except ValueError:
log.warning(self.get_max_sdk_version() + 'is not a valid value for <uses-permission> maxSdkVersion')
except TypeError:
pass
return maxSdkVersion
def is_valid_APK(self):
"""
Return true if the APK is valid, false otherwise.
An APK is seen as valid, if the AndroidManifest.xml could be successful parsed.
This does not mean that the APK has a valid signature nor that the APK
can be installed on an Android system.
:rtype: boolean
"""
return self.valid_apk
def get_filename(self):
"""
Return the filename of the APK
:rtype: :class:`str`
"""
return self.filename
def get_app_name(self):
"""
Return the appname of the APK
This name is read from the AndroidManifest.xml
using the application android:label.
If no label exists, the android:label of the main activity is used.
If there is also no main activity label, an empty string is returned.
:rtype: :class:`str`
"""
app_name = self.get_attribute_value('application', 'label')
if app_name is None:
activities = self.get_main_activities()
main_activity_name = None
if len(activities) > 0:
main_activity_name = activities.pop()
app_name = self.get_attribute_value(
'activity', 'label', name=main_activity_name
)
if app_name is None:
# No App name set
# TODO return packagename instead?
log.warning("It looks like that no app name is set for the main activity!")
return ""
if app_name.startswith("@"):
res_parser = self.get_android_resources()
if not res_parser:
# TODO: What should be the correct return value here?
return app_name
res_id, package = res_parser.parse_id(app_name)
# If the package name is the same as the APK package,
# we should be able to resolve the ID.
if package and package != self.get_package():
if package == 'android':
# TODO: we can not resolve this, as we lack framework-res.apk
# one exception would be when parsing framework-res.apk directly.
log.warning("Resource ID with android package name encountered! "
"Will not resolve, framework-res.apk would be required.")
return app_name
else:
# TODO should look this up, might be in the resources
log.warning("Resource ID with Package name '{}' encountered! Will not resolve".format(package))
return app_name
try:
app_name = res_parser.get_resolved_res_configs(
res_id,
ARSCResTableConfig.default_config())[0][1]
except Exception as e:
log.warning("Exception selecting app name: %s" % e)
return app_name
def get_app_icon(self, max_dpi=65536):
"""
Return the first icon file name, which density is not greater than max_dpi,
unless exact icon resolution is set in the manifest, in which case
return the exact file.
This information is read from the AndroidManifest.xml
From https://developer.android.com/guide/practices/screens_support.html
and https://developer.android.com/ndk/reference/group___configuration.html
* DEFAULT 0dpi
* ldpi (low) 120dpi
* mdpi (medium) 160dpi
* TV 213dpi
* hdpi (high) 240dpi
* xhdpi (extra-high) 320dpi
* xxhdpi (extra-extra-high) 480dpi
* xxxhdpi (extra-extra-extra-high) 640dpi
* anydpi 65534dpi (0xFFFE)
* nodpi 65535dpi (0xFFFF)
There is a difference between nodpi and anydpi:
nodpi will be used if no other density is specified. Or the density does not match.
nodpi is the fallback for everything else. If there is a resource that matches the DPI,
this is used.
anydpi is also valid for all densities but in this case, anydpi will overrule all other files!
Therefore anydpi is usually used with vector graphics and with constraints on the API level.
For example adaptive icons are usually marked as anydpi.
When it comes now to selecting an icon, there is the following flow:
1) is there an anydpi icon?
2) is there an icon for the dpi of the device?
3) is there a nodpi icon?
4) (only on very old devices) is there a icon with dpi 0 (the default)
For more information read here: https://stackoverflow.com/a/34370735/446140
:rtype: :class:`str`
"""
main_activity_name = self.get_main_activity()
app_icon = self.get_attribute_value(
'activity', 'icon', name=main_activity_name)
if not app_icon:
app_icon = self.get_attribute_value('application', 'icon')
res_parser = self.get_android_resources()
if not res_parser:
# Can not do anything below this point to resolve...
return None
if not app_icon:
res_id = res_parser.get_res_id_by_key(self.package, 'mipmap', 'ic_launcher')
if res_id:
app_icon = "@%x" % res_id
if not app_icon:
res_id = res_parser.get_res_id_by_key(self.package, 'drawable', 'ic_launcher')
if res_id:
app_icon = "@%x" % res_id
if not app_icon:
# If the icon can not be found, return now
return None
if app_icon.startswith("@"):
res_id = int(app_icon[1:], 16)
candidates = res_parser.get_resolved_res_configs(res_id)
app_icon = None
current_dpi = -1
try:
for config, file_name in candidates:
dpi = config.get_density()
if current_dpi < dpi <= max_dpi:
app_icon = file_name
current_dpi = dpi
except Exception as e:
log.warning("Exception selecting app icon: %s" % e)
return app_icon
def get_package(self):
"""
Return the name of the package
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.package
def get_androidversion_code(self):
"""
Return the android version code
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.androidversion["Code"]
def get_androidversion_name(self):
"""
Return the android version name
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.androidversion["Name"]
def get_files(self):
"""
Return the file names inside the APK.
:rtype: a list of :class:`str`
"""
return self.zip.namelist()
def _get_file_magic_name(self, buffer):
"""
Return the filetype guessed for a buffer
:param buffer: bytes
:return: str of filetype
"""
default = "Unknown"
ftype = None
try:
# Magic is optional
import magic
except ImportError:
return default
try:
# There are several implementations of magic,
# unfortunately all called magic
# We use this one: https://github.com/ahupp/python-magic/
getattr(magic, "MagicException")
except AttributeError:
# Looks like no magic was installed
return default
try:
ftype = magic.from_buffer(buffer[:1024])
except magic.MagicError as e:
log.exception("Error getting the magic type!")
return default
if not ftype:
return default
else:
return self._patch_magic(buffer, ftype)
@property
def files(self):
"""
Returns a dictionary of filenames and detected magic type
:return: dictionary of files and their mime type
"""
return self.get_files_types()
def _patch_magic(self, buffer, orig):
"""
Overwrite some probably wrong detections by mime libraries
:param buffer: bytes of the file to detect
:param orig: guess by mime libary
:return: corrected guess
"""
if ("Zip" in orig) or ('(JAR)' in orig):
val = is_android_raw(buffer)
if val == "APK":
return "Android application package file"
return orig
def get_files_crc32(self):
"""
Calculates and returns a dictionary of filenames and CRC32
:return: dict of filename: CRC32
"""
if self.files_crc32 == {}:
for i in self.get_files():
buffer = self.zip.read(i)
self.files_crc32[i] = crc32(buffer)
return self.files_crc32
def get_files_information(self):
"""
Return the files inside the APK with their associated types and crc32
:rtype: str, str, int
"""
for k in self.get_files():
yield k, self.get_files_types()[k], self.get_files_crc32()[k]
def get_raw(self):
"""
Return raw bytes of the APK
:rtype: bytes
"""
return self.__raw
def get_file(self, filename):
"""
Return the raw data of the specified filename
inside the APK
:rtype: bytes
"""
try:
return self.zip.read(filename)
except KeyError:
raise FileNotPresent(filename)
def get_dex(self):
"""
Return the raw data of the classes dex file
This will give you the data of the file called `classes.dex`
inside the APK. If the APK has multiple DEX files, you need to use :func:`~APK.get_all_dex`.
:rtype: bytes
"""
try:
return self.get_file("classes.dex")
except FileNotPresent:
return ""
def get_dex_names(self):
"""
Return the names of all DEX files found in the APK.
This method only accounts for "offical" dex files, i.e. all files
in the root directory of the APK named classes.dex or classes[0-9]+.dex
:rtype: a list of str
"""
dexre = re.compile("classes(\d*).dex")
return filter(lambda x: dexre.match(x), self.get_files())
def get_all_dex(self):
"""
Return the raw data of all classes dex files
:rtype: a generator of bytes
"""
for dex_name in self.get_dex_names():
yield self.get_file(dex_name)
def is_multidex(self):
"""
Test if the APK has multiple DEX files
:return: True if multiple dex found, otherwise False
"""
dexre = re.compile("^classes(\d+)?.dex$")
return len([instance for instance in self.get_files() if dexre.search(instance)]) > 1
@DeprecationWarning
def get_elements(self, tag_name, attribute, with_namespace=True):
"""
Deprecated: use `get_all_attribute_value()` instead
Return elements in xml files which match with the tag name and the specific attribute
:param tag_name: a string which specify the tag name
:param attribute: a string which specify the attribute
"""
for i in self.xml:
if self.xml[i] is None:
continue
for item in self.xml[i].findall('.//' + tag_name):
if with_namespace:
value = item.get(self._ns(attribute))
else:
value = item.get(attribute)
# There might be an attribute without the namespace
if value:
yield self._format_value(value)
def _format_value(self, value):
"""
Format a value with packagename, if not already set
:param value:
:return:
"""
if len(value) > 0:
if value[0] == ".":
value = self.package + value
else:
v_dot = value.find(".")
if v_dot == 0:
value = self.package + "." + value
elif v_dot == -1:
value = self.package + "." + value
return value
@DeprecationWarning
def get_element(self, tag_name, attribute, **attribute_filter):
"""
:Deprecated: use `get_attribute_value()` instead
Return element in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:rtype: string
"""
for i in self.xml:
if self.xml[i] is None:
continue
tag = self.xml[i].findall('.//' + tag_name)
if len(tag) == 0:
return None
for item in tag:
skip_this_item = False
for attr, val in list(attribute_filter.items()):
attr_val = item.get(self._ns(attr))
if attr_val != val:
skip_this_item = True
break
if skip_this_item:
continue
value = item.get(self._ns(attribute))
if value is not None:
return value
return None
def get_all_attribute_value(
self, tag_name, attribute, format_value=True, **attribute_filter
):
"""
Return all the attribute values in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:param format_value: specify if the value needs to be formatted with packagename
:type format_value: boolean
"""
tags = self.find_tags(tag_name, **attribute_filter)
for tag in tags:
value = tag.get(attribute) or tag.get(self._ns(attribute))
if value is not None:
if format_value:
yield self._format_value(value)
else:
yield value
def get_attribute_value(
self, tag_name, attribute, format_value=False, **attribute_filter
):
"""
Return the attribute value in xml files which matches the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:param format_value: specify if the value needs to be formatted with packagename
:type format_value: boolean
"""
for value in self.get_all_attribute_value(
tag_name, attribute, format_value, **attribute_filter):
if value is not None:
return value
def get_value_from_tag(self, tag, attribute):
"""
Return the value of the attribute in a specific tag
:param tag: specify the tag element
:type tag: Element
:param attribute: specify the attribute
:type attribute: string
"""
# TODO: figure out if both android:name and name tag exist which one to give preference
value = tag.get(self._ns(attribute))
if value is None:
log.warning("Failed to get the attribute with namespace")
value = tag.get(attribute)
return value
def find_tags(self, tag_name, **attribute_filter):
"""
Return a list of all the matched tags in all available xml
:param tag: specify the tag name
:type tag: string
"""
all_tags = [
self.find_tags_from_xml(
i, tag_name, **attribute_filter
)
for i in self.xml
]
return [tag for tag_list in all_tags for tag in tag_list]
def find_tags_from_xml(
self, xml_name, tag_name, **attribute_filter
):
"""
Return a list of all the matched tags in a specific xml
:param xml_name: specify from which xml to pick the tag from
:type xml_name: string
:param tag_name: specify the tag name
:type tag_name: string
"""
xml = self.xml[xml_name]
if xml is None:
return []
if xml.tag == tag_name:
if self.is_tag_matched(
xml.tag, **attribute_filter
):
return [xml]
return []
tags = xml.findall(".//" + tag_name)
return [
tag for tag in tags if self.is_tag_matched(
tag, **attribute_filter
)
]
def is_tag_matched(self, tag, **attribute_filter):
"""
Return true if the attributes matches in attribute filter
:param tag: specify the tag element
:type tag: Element
:param attribute: specify the attribute
:type attribute: string
"""
if len(attribute_filter) <= 0:
return True
for attr, value in attribute_filter.items():
# TODO: figure out if both android:name and name tag exist which one to give preference
_value = tag.get(self._ns(attr))
if _value is None:
log.warning("Failed to get the attribute with namespace")
_value = tag.get(attr)
if _value != value:
return False
return True
def get_main_activities(self):
"""
Return names of the main activities
These values are read from the AndroidManifest.xml
:rtype: a set of str
"""
x = set()
y = set()
for i in self.xml:
if self.xml[i] is None:
continue
activities_and_aliases = self.xml[i].findall(".//activity") + \
self.xml[i].findall(".//activity-alias")
for item in activities_and_aliases:
# Some applications have more than one MAIN activity.
# For example: paid and free content
activityEnabled = item.get(self._ns("enabled"))
if activityEnabled == "false":
continue
for sitem in item.findall(".//action"):
val = sitem.get(self._ns("name"))
if val == "android.intent.action.MAIN":
activity = item.get(self._ns("name"))
if activity is not None:
x.add(item.get(self._ns("name")))
else:
log.warning('Main activity without name')
for sitem in item.findall(".//category"):
val = sitem.get(self._ns("name"))
if val == "android.intent.category.LAUNCHER":
activity = item.get(self._ns("name"))
if activity is not None:
y.add(item.get(self._ns("name")))
else:
log.warning('Launcher activity without name')
return x.intersection(y)
def get_main_activity(self):
"""
Return the name of the main activity
This value is read from the AndroidManifest.xml
:rtype: str
"""
activities = self.get_main_activities()
if len(activities) > 0:
return self._format_value(activities.pop())
return None
def get_activities(self):
"""
Return the android:name attribute of all activities
:rtype: a list of str
"""
return list(self.get_all_attribute_value("activity", "name"))
def get_services(self):
"""
Return the android:name attribute of all services
:rtype: a list of str
"""
return list(self.get_all_attribute_value("service", "name"))
def get_receivers(self):
"""
Return the android:name attribute of all receivers
:rtype: a list of string
"""
return list(self.get_all_attribute_value("receiver", "name"))
def get_providers(self):
"""
Return the android:name attribute of all providers
:rtype: a list of string
"""
return list(self.get_all_attribute_value("provider", "name"))
def get_intent_filters(self, itemtype, name):
"""
Find intent filters for a given item and name.
Intent filter are attached to activities, services or receivers.
You can search for the intent filters of such items and get a dictionary of all
attached actions and intent categories.
:param itemtype: the type of parent item to look for, e.g. `activity`, `service` or `receiver`
:param name: the `android:name` of the parent item, e.g. activity name
:return: a dictionary with the keys `action` and `category` containing the `android:name` of those items
"""
d = {"action": [], "category": []}
for i in self.xml:
# TODO: this can probably be solved using a single xpath
for item in self.xml[i].findall(".//" + itemtype):
if self._format_value(item.get(self._ns("name"))) == name:
for sitem in item.findall(".//intent-filter"):
for ssitem in sitem.findall("action"):
if ssitem.get(self._ns("name")) not in d["action"]:
d["action"].append(ssitem.get(self._ns("name")))
for ssitem in sitem.findall("category"):
if ssitem.get(self._ns("name")) not in d["category"]:
d["category"].append(ssitem.get(self._ns("name")))
if not d["action"]:
del d["action"]
if not d["category"]:
del d["category"]
return d
def get_permissions(self):
"""
Return permissions names declared in the AndroidManifest.xml.
It is possible that permissions are returned multiple times,
as this function does not filter the permissions, i.e. it shows you
exactly what was defined in the AndroidManifest.xml.
Implied permissions, which are granted automatically, are not returned
here. Use :meth:`get_uses_implied_permission_list` if you need a list
of implied permissions.
:returns: A list of permissions
:rtype: list
"""
return self.permissions
def get_uses_implied_permission_list(self):
"""
Return all permissions implied by the target SDK or other permissions.
:rtype: list of string
"""
target_sdk_version = self.get_effective_target_sdk_version()
READ_CALL_LOG = 'android.permission.READ_CALL_LOG'
READ_CONTACTS = 'android.permission.READ_CONTACTS'
READ_EXTERNAL_STORAGE = 'android.permission.READ_EXTERNAL_STORAGE'
READ_PHONE_STATE = 'android.permission.READ_PHONE_STATE'
WRITE_CALL_LOG = 'android.permission.WRITE_CALL_LOG'
WRITE_CONTACTS = 'android.permission.WRITE_CONTACTS'
WRITE_EXTERNAL_STORAGE = 'android.permission.WRITE_EXTERNAL_STORAGE'
implied = []
implied_WRITE_EXTERNAL_STORAGE = False
if target_sdk_version < 4:
if WRITE_EXTERNAL_STORAGE not in self.permissions:
implied.append([WRITE_EXTERNAL_STORAGE, None])
implied_WRITE_EXTERNAL_STORAGE = True
if READ_PHONE_STATE not in self.permissions:
implied.append([READ_PHONE_STATE, None])
if (WRITE_EXTERNAL_STORAGE in self.permissions or implied_WRITE_EXTERNAL_STORAGE) \
and READ_EXTERNAL_STORAGE not in self.permissions:
maxSdkVersion = None
for name, version in self.uses_permissions:
if name == WRITE_EXTERNAL_STORAGE:
maxSdkVersion = version
break
implied.append([READ_EXTERNAL_STORAGE, maxSdkVersion])
if target_sdk_version < 16:
if READ_CONTACTS in self.permissions \
and READ_CALL_LOG not in self.permissions:
implied.append([READ_CALL_LOG, None])
if WRITE_CONTACTS in self.permissions \
and WRITE_CALL_LOG not in self.permissions:
implied.append([WRITE_CALL_LOG, None])
return implied
def get_details_permissions(self):
"""
Return permissions with details
:rtype: dict of {permission: [protectionLevel, label, description]}
"""
l = {}
for i in self.permissions:
if i in self.permission_module:
x = self.permission_module[i]
l[i] = [x["protectionLevel"], x["label"], x["description"]]
else:
# FIXME: the permission might be signature, if it is defined by the app itself!
l[i] = ["normal", "Unknown permission from android reference",
"Unknown permission from android reference"]
return l
@DeprecationWarning
def get_requested_permissions(self):
"""
Returns all requested permissions.
It has the same result as :meth:`get_permissions` and might be removed in the future
:rtype: list of str
"""
return self.get_permissions()
def get_requested_aosp_permissions(self):
"""
Returns requested permissions declared within AOSP project.
This includes several other permissions as well, which are in the platform apps.
:rtype: list of str
"""
aosp_permissions = []
all_permissions = self.get_permissions()
for perm in all_permissions:
if perm in list(self.permission_module.keys()):
aosp_permissions.append(perm)
return aosp_permissions
def get_requested_aosp_permissions_details(self):
"""
Returns requested aosp permissions with details.
:rtype: dictionary
"""
l = {}
for i in self.permissions:
try:
l[i] = self.permission_module[i]
except KeyError:
# if we have not found permission do nothing
continue
return l
def get_requested_third_party_permissions(self):
"""
Returns list of requested permissions not declared within AOSP project.
:rtype: list of strings
"""
third_party_permissions = []
all_permissions = self.get_permissions()
for perm in all_permissions:
if perm not in list(self.permission_module.keys()):
third_party_permissions.append(perm)
return third_party_permissions
def get_declared_permissions(self):
"""
Returns list of the declared permissions.
:rtype: list of strings
"""
return list(self.declared_permissions.keys())
def get_declared_permissions_details(self):
"""
Returns declared permissions with the details.
:rtype: dict
"""
return self.declared_permissions
def get_max_sdk_version(self):
"""
Return the android:maxSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "maxSdkVersion")
def get_min_sdk_version(self):
"""
Return the android:minSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "minSdkVersion")
def get_target_sdk_version(self):
"""
Return the android:targetSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "targetSdkVersion")
def get_effective_target_sdk_version(self):
"""
Return the effective targetSdkVersion, always returns int > 0.
If the targetSdkVersion is not set, it defaults to 1. This is
set based on defaults as defined in:
https://developer.android.com/guide/topics/manifest/uses-sdk-element.html
:rtype: int
"""
target_sdk_version = self.get_target_sdk_version()
if not target_sdk_version:
target_sdk_version = self.get_min_sdk_version()
try:
return int(target_sdk_version)
except (ValueError, TypeError):
return 1
def get_libraries(self):
"""
Return the android:name attributes for libraries
:rtype: list
"""
return list(self.get_all_attribute_value("uses-library", "name"))
def get_features(self):
"""
Return a list of all android:names found for the tag uses-feature
in the AndroidManifest.xml
:return: list
"""
return list(self.get_all_attribute_value("uses-feature", "name"))
def is_wearable(self):
"""
Checks if this application is build for wearables by
checking if it uses the feature 'android.hardware.type.watch'
See: https://developer.android.com/training/wearables/apps/creating.html for more information.
Not every app is setting this feature (not even the example Google provides),
so it might be wise to not 100% rely on this feature.
:return: True if wearable, False otherwise
"""
return 'android.hardware.type.watch' in self.get_features()
def is_leanback(self):
"""
Checks if this application is build for TV (Leanback support)
by checkin if it uses the feature 'android.software.leanback'
:return: True if leanback feature is used, false otherwise
"""
return 'android.software.leanback' in self.get_features()
def is_androidtv(self):
"""
Checks if this application does not require a touchscreen,
as this is the rule to get into the TV section of the Play Store
See: https://developer.android.com/training/tv/start/start.html for more information.
:return: True if 'android.hardware.touchscreen' is not required, False otherwise
"""
return self.get_attribute_value(
'uses-feature', 'name', required="false",
name="android.hardware.touchscreen"
) == "android.hardware.touchscreen"
def new_zip(self, filename, deleted_files=None, new_files={}):
"""
Create a new zip file
:param filename: the output filename of the zip
:param deleted_files: a regex pattern to remove specific file
:param new_files: a dictionnary of new files
:type filename: string
:type deleted_files: None or a string
:type new_files: a dictionnary (key:filename, value:content of the file)
"""
zout = zipfile.ZipFile(filename, 'w')
for item in self.zip.infolist():
# Block one: deleted_files, or deleted_files and new_files
if deleted_files is not None:
if re.match(deleted_files, item.filename) is None:
# if the regex of deleted_files doesn't match the filename
if new_files is not False:
if item.filename in new_files:
# and if the filename is in new_files
zout.writestr(item, new_files[item.filename])
continue
# Otherwise, write the original file.
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
# Block two: deleted_files is None, new_files is not empty
elif new_files is not False:
if item.filename in new_files:
zout.writestr(item, new_files[item.filename])
else:
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
# Block three: deleted_files is None, new_files is empty.
# Just write out the default zip
else:
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
zout.close()
def get_android_manifest_axml(self):
"""
Return the :class:`AXMLPrinter` object which corresponds to the AndroidManifest.xml file
:rtype: :class:`~androguard.core.bytecodes.axml.AXMLPrinter`
"""
try:
return self.axml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_manifest_xml(self):
"""
Return the parsed xml object which corresponds to the AndroidManifest.xml file
:rtype: :class:`~lxml.etree.Element`
"""
try:
return self.xml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_resources(self):
"""
Return the :class:`~androguard.core.bytecodes.axml.ARSCParser`
object which corresponds to the resources.arsc file
:rtype: :class:`~androguard.core.bytecodes.axml.ARSCParser`
"""
try:
return self.arsc["resources.arsc"]
except KeyError:
if "resources.arsc" not in self.zip.namelist():
# There is a rare case, that no resource file is supplied.
# Maybe it was added manually, thus we check here
return None
self.arsc["resources.arsc"] = ARSCParser(self.zip.read("resources.arsc"))
return self.arsc["resources.arsc"]
def show(self):
self.get_files_types()
print("FILES: ")
for i in self.get_files():
try:
print("\t", i, self._files[i], "%x" % self.files_crc32[i])
except KeyError:
print("\t", i, "%x" % self.files_crc32[i])
print("DECLARED PERMISSIONS:")
declared_permissions = self.get_declared_permissions()
for i in declared_permissions:
print("\t", i)
print("REQUESTED PERMISSIONS:")
requested_permissions = self.get_permissions()
for i in requested_permissions:
print("\t", i)
print("MAIN ACTIVITY: ", self.get_main_activity())
print("ACTIVITIES: ")
activities = self.get_activities()
for i in activities:
filters = self.get_intent_filters("activity", i)
print("\t", i, filters or "")
print("SERVICES: ")
services = self.get_services()
for i in services:
filters = self.get_intent_filters("service", i)
print("\t", i, filters or "")
print("RECEIVERS: ")
receivers = self.get_receivers()
for i in receivers:
filters = self.get_intent_filters("receiver", i)
print("\t", i, filters or "")
print("PROVIDERS: ", self.get_providers())
@property
def application(self):
return self.get_app_name()
@property
def packagename(self):
return self.get_package()
@property
def version_name(self):
return self.get_androidversion_name()
@property
def version_code(self):
return self.get_androidversion_code()
@property
def icon_info(self):
return self.get_app_icon()
@property
def icon_data(self):
app_icon_file = self.get_app_icon()
app_icon_data = None
try:
app_icon_data = self.get_file(app_icon_file)
except FileNotPresent:
try:
app_icon_data = self.get_file(app_icon_file.encode().decode('cp437'))
except FileNotPresent:
pass
return app_icon_data
|
appknox/pyaxmlparser | pyaxmlparser/core.py | APK._patch_magic | python | def _patch_magic(self, buffer, orig):
if ("Zip" in orig) or ('(JAR)' in orig):
val = is_android_raw(buffer)
if val == "APK":
return "Android application package file"
return orig | Overwrite some probably wrong detections by mime libraries
:param buffer: bytes of the file to detect
:param orig: guess by mime libary
:return: corrected guess | train | https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/core.py#L513-L526 | null | class APK(object):
def __init__(self, filename, raw=False, magic_file=None, skip_analysis=False, testzip=False):
"""
This class can access to all elements in an APK file
example::
APK("myfile.apk")
APK(read("myfile.apk"), raw=True)
:param filename: specify the path of the file, or raw data
:param raw: specify if the filename is a path or raw data (optional)
:param magic_file: specify the magic file (not used anymore - legacy only)
:param skip_analysis: Skip the analysis, e.g. no manifest files are read. (default: False)
:param testzip: Test the APK for integrity, e.g. if the ZIP file is broken.
Throw an exception on failure (default False)
:type filename: string
:type raw: boolean
:type magic_file: string
:type skip_analysis: boolean
:type testzip: boolean
"""
if magic_file:
log.warning("You set magic_file but this parameter is actually unused. You should remove it.")
self.filename = filename
self.xml = {}
self.axml = {}
self.arsc = {}
self.package = ""
self.androidversion = {}
self.permissions = []
self.uses_permissions = []
self.declared_permissions = {}
self.valid_apk = False
self._files = {}
self.files_crc32 = {}
if raw is True:
self.__raw = bytearray(filename)
self._sha256 = hashlib.sha256(self.__raw).hexdigest()
# Set the filename to something sane
self.filename = "raw_apk_sha256:{}".format(self._sha256)
else:
self.__raw = bytearray(read(filename))
self.zip = zipfile.ZipFile(io.BytesIO(self.__raw), mode="r")
if testzip:
# Test the zipfile for integrity before continuing.
# This process might be slow, as the whole file is read.
# Therefore it is possible to enable it as a separate feature.
#
# A short benchmark showed, that testing the zip takes about 10 times longer!
# e.g. normal zip loading (skip_analysis=True) takes about 0.01s, where
# testzip takes 0.1s!
ret = self.zip.testzip()
if ret is not None:
# we could print the filename here, but there are zip which are so broken
# That the filename is either very very long or does not make any sense.
# Thus we do not do it, the user might find out by using other tools.
raise BrokenAPKError("The APK is probably broken: testzip returned an error.")
if not skip_analysis:
self._apk_analysis()
def _ns(self, name):
"""
return the name including the Android namespace
"""
return NS_ANDROID + name
def _apk_analysis(self):
"""
Run analysis on the APK file.
This method is usually called by __init__ except if skip_analysis is False.
It will then parse the AndroidManifest.xml and set all fields in the APK class which can be
extracted from the Manifest.
"""
i = "AndroidManifest.xml"
try:
manifest_data = self.zip.read(i)
except KeyError:
log.warning("Missing AndroidManifest.xml. Is this an APK file?")
else:
ap = AXMLPrinter(manifest_data)
if not ap.is_valid():
log.error("Error while parsing AndroidManifest.xml - is the file valid?")
return
self.axml[i] = ap
self.xml[i] = self.axml[i].get_xml_obj()
if self.axml[i].is_packed():
log.warning("XML Seems to be packed, operations on the AndroidManifest.xml might fail.")
if self.xml[i] is not None:
if self.xml[i].tag != "manifest":
log.error("AndroidManifest.xml does not start with a <manifest> tag! Is this a valid APK?")
return
self.package = self.get_attribute_value("manifest", "package")
self.androidversion["Code"] = self.get_attribute_value("manifest", "versionCode")
self.androidversion["Name"] = self.get_attribute_value("manifest", "versionName")
permission = list(self.get_all_attribute_value("uses-permission", "name"))
self.permissions = list(set(self.permissions + permission))
for uses_permission in self.find_tags("uses-permission"):
self.uses_permissions.append([
self.get_value_from_tag(uses_permission, "name"),
self._get_permission_maxsdk(uses_permission)
])
# getting details of the declared permissions
for d_perm_item in self.find_tags('permission'):
d_perm_name = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "name")))
d_perm_label = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "label")))
d_perm_description = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "description")))
d_perm_permissionGroup = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "permissionGroup")))
d_perm_protectionLevel = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "protectionLevel")))
d_perm_details = {
"label": d_perm_label,
"description": d_perm_description,
"permissionGroup": d_perm_permissionGroup,
"protectionLevel": d_perm_protectionLevel,
}
self.declared_permissions[d_perm_name] = d_perm_details
self.valid_apk = True
def __getstate__(self):
"""
Function for pickling APK Objects.
We remove the zip from the Object, as it is not pickable
And it does not make any sense to pickle it anyways.
:return: the picklable APK Object without zip.
"""
# Upon pickling, we need to remove the ZipFile
x = self.__dict__
x['axml'] = str(x['axml'])
x['xml'] = str(x['xml'])
del x['zip']
return x
def __setstate__(self, state):
"""
Load a pickled APK Object and restore the state
We load the zip file back by reading __raw from the Object.
:param state: pickled state
"""
self.__dict__ = state
self.zip = zipfile.ZipFile(io.BytesIO(self.__raw), mode="r")
def _get_res_string_value(self, string):
if not string.startswith('@string/'):
return string
string_key = string[9:]
res_parser = self.get_android_resources()
if not res_parser:
return ''
string_value = ''
for package_name in res_parser.get_packages_names():
extracted_values = res_parser.get_string(package_name, string_key)
if extracted_values:
string_value = extracted_values[1]
break
return string_value
def _get_permission_maxsdk(self, item):
maxSdkVersion = None
try:
maxSdkVersion = int(self.get_value_from_tag(item, "maxSdkVersion"))
except ValueError:
log.warning(self.get_max_sdk_version() + 'is not a valid value for <uses-permission> maxSdkVersion')
except TypeError:
pass
return maxSdkVersion
def is_valid_APK(self):
"""
Return true if the APK is valid, false otherwise.
An APK is seen as valid, if the AndroidManifest.xml could be successful parsed.
This does not mean that the APK has a valid signature nor that the APK
can be installed on an Android system.
:rtype: boolean
"""
return self.valid_apk
def get_filename(self):
"""
Return the filename of the APK
:rtype: :class:`str`
"""
return self.filename
def get_app_name(self):
"""
Return the appname of the APK
This name is read from the AndroidManifest.xml
using the application android:label.
If no label exists, the android:label of the main activity is used.
If there is also no main activity label, an empty string is returned.
:rtype: :class:`str`
"""
app_name = self.get_attribute_value('application', 'label')
if app_name is None:
activities = self.get_main_activities()
main_activity_name = None
if len(activities) > 0:
main_activity_name = activities.pop()
app_name = self.get_attribute_value(
'activity', 'label', name=main_activity_name
)
if app_name is None:
# No App name set
# TODO return packagename instead?
log.warning("It looks like that no app name is set for the main activity!")
return ""
if app_name.startswith("@"):
res_parser = self.get_android_resources()
if not res_parser:
# TODO: What should be the correct return value here?
return app_name
res_id, package = res_parser.parse_id(app_name)
# If the package name is the same as the APK package,
# we should be able to resolve the ID.
if package and package != self.get_package():
if package == 'android':
# TODO: we can not resolve this, as we lack framework-res.apk
# one exception would be when parsing framework-res.apk directly.
log.warning("Resource ID with android package name encountered! "
"Will not resolve, framework-res.apk would be required.")
return app_name
else:
# TODO should look this up, might be in the resources
log.warning("Resource ID with Package name '{}' encountered! Will not resolve".format(package))
return app_name
try:
app_name = res_parser.get_resolved_res_configs(
res_id,
ARSCResTableConfig.default_config())[0][1]
except Exception as e:
log.warning("Exception selecting app name: %s" % e)
return app_name
def get_app_icon(self, max_dpi=65536):
"""
Return the first icon file name, which density is not greater than max_dpi,
unless exact icon resolution is set in the manifest, in which case
return the exact file.
This information is read from the AndroidManifest.xml
From https://developer.android.com/guide/practices/screens_support.html
and https://developer.android.com/ndk/reference/group___configuration.html
* DEFAULT 0dpi
* ldpi (low) 120dpi
* mdpi (medium) 160dpi
* TV 213dpi
* hdpi (high) 240dpi
* xhdpi (extra-high) 320dpi
* xxhdpi (extra-extra-high) 480dpi
* xxxhdpi (extra-extra-extra-high) 640dpi
* anydpi 65534dpi (0xFFFE)
* nodpi 65535dpi (0xFFFF)
There is a difference between nodpi and anydpi:
nodpi will be used if no other density is specified. Or the density does not match.
nodpi is the fallback for everything else. If there is a resource that matches the DPI,
this is used.
anydpi is also valid for all densities but in this case, anydpi will overrule all other files!
Therefore anydpi is usually used with vector graphics and with constraints on the API level.
For example adaptive icons are usually marked as anydpi.
When it comes now to selecting an icon, there is the following flow:
1) is there an anydpi icon?
2) is there an icon for the dpi of the device?
3) is there a nodpi icon?
4) (only on very old devices) is there a icon with dpi 0 (the default)
For more information read here: https://stackoverflow.com/a/34370735/446140
:rtype: :class:`str`
"""
main_activity_name = self.get_main_activity()
app_icon = self.get_attribute_value(
'activity', 'icon', name=main_activity_name)
if not app_icon:
app_icon = self.get_attribute_value('application', 'icon')
res_parser = self.get_android_resources()
if not res_parser:
# Can not do anything below this point to resolve...
return None
if not app_icon:
res_id = res_parser.get_res_id_by_key(self.package, 'mipmap', 'ic_launcher')
if res_id:
app_icon = "@%x" % res_id
if not app_icon:
res_id = res_parser.get_res_id_by_key(self.package, 'drawable', 'ic_launcher')
if res_id:
app_icon = "@%x" % res_id
if not app_icon:
# If the icon can not be found, return now
return None
if app_icon.startswith("@"):
res_id = int(app_icon[1:], 16)
candidates = res_parser.get_resolved_res_configs(res_id)
app_icon = None
current_dpi = -1
try:
for config, file_name in candidates:
dpi = config.get_density()
if current_dpi < dpi <= max_dpi:
app_icon = file_name
current_dpi = dpi
except Exception as e:
log.warning("Exception selecting app icon: %s" % e)
return app_icon
def get_package(self):
"""
Return the name of the package
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.package
def get_androidversion_code(self):
"""
Return the android version code
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.androidversion["Code"]
def get_androidversion_name(self):
"""
Return the android version name
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.androidversion["Name"]
def get_files(self):
"""
Return the file names inside the APK.
:rtype: a list of :class:`str`
"""
return self.zip.namelist()
def _get_file_magic_name(self, buffer):
"""
Return the filetype guessed for a buffer
:param buffer: bytes
:return: str of filetype
"""
default = "Unknown"
ftype = None
try:
# Magic is optional
import magic
except ImportError:
return default
try:
# There are several implementations of magic,
# unfortunately all called magic
# We use this one: https://github.com/ahupp/python-magic/
getattr(magic, "MagicException")
except AttributeError:
# Looks like no magic was installed
return default
try:
ftype = magic.from_buffer(buffer[:1024])
except magic.MagicError as e:
log.exception("Error getting the magic type!")
return default
if not ftype:
return default
else:
return self._patch_magic(buffer, ftype)
@property
def files(self):
"""
Returns a dictionary of filenames and detected magic type
:return: dictionary of files and their mime type
"""
return self.get_files_types()
def get_files_types(self):
"""
Return the files inside the APK with their associated types (by using python-magic)
:rtype: a dictionnary
"""
if self._files == {}:
# Generate File Types / CRC List
for i in self.get_files():
buffer = self.zip.read(i)
self.files_crc32[i] = crc32(buffer)
# FIXME why not use the crc from the zipfile?
# should be validated as well.
# crc = self.zip.getinfo(i).CRC
self._files[i] = self._get_file_magic_name(buffer)
return self._files
def get_files_crc32(self):
"""
Calculates and returns a dictionary of filenames and CRC32
:return: dict of filename: CRC32
"""
if self.files_crc32 == {}:
for i in self.get_files():
buffer = self.zip.read(i)
self.files_crc32[i] = crc32(buffer)
return self.files_crc32
def get_files_information(self):
"""
Return the files inside the APK with their associated types and crc32
:rtype: str, str, int
"""
for k in self.get_files():
yield k, self.get_files_types()[k], self.get_files_crc32()[k]
def get_raw(self):
"""
Return raw bytes of the APK
:rtype: bytes
"""
return self.__raw
def get_file(self, filename):
"""
Return the raw data of the specified filename
inside the APK
:rtype: bytes
"""
try:
return self.zip.read(filename)
except KeyError:
raise FileNotPresent(filename)
def get_dex(self):
"""
Return the raw data of the classes dex file
This will give you the data of the file called `classes.dex`
inside the APK. If the APK has multiple DEX files, you need to use :func:`~APK.get_all_dex`.
:rtype: bytes
"""
try:
return self.get_file("classes.dex")
except FileNotPresent:
return ""
def get_dex_names(self):
"""
Return the names of all DEX files found in the APK.
This method only accounts for "offical" dex files, i.e. all files
in the root directory of the APK named classes.dex or classes[0-9]+.dex
:rtype: a list of str
"""
dexre = re.compile("classes(\d*).dex")
return filter(lambda x: dexre.match(x), self.get_files())
def get_all_dex(self):
"""
Return the raw data of all classes dex files
:rtype: a generator of bytes
"""
for dex_name in self.get_dex_names():
yield self.get_file(dex_name)
def is_multidex(self):
"""
Test if the APK has multiple DEX files
:return: True if multiple dex found, otherwise False
"""
dexre = re.compile("^classes(\d+)?.dex$")
return len([instance for instance in self.get_files() if dexre.search(instance)]) > 1
@DeprecationWarning
def get_elements(self, tag_name, attribute, with_namespace=True):
"""
Deprecated: use `get_all_attribute_value()` instead
Return elements in xml files which match with the tag name and the specific attribute
:param tag_name: a string which specify the tag name
:param attribute: a string which specify the attribute
"""
for i in self.xml:
if self.xml[i] is None:
continue
for item in self.xml[i].findall('.//' + tag_name):
if with_namespace:
value = item.get(self._ns(attribute))
else:
value = item.get(attribute)
# There might be an attribute without the namespace
if value:
yield self._format_value(value)
def _format_value(self, value):
"""
Format a value with packagename, if not already set
:param value:
:return:
"""
if len(value) > 0:
if value[0] == ".":
value = self.package + value
else:
v_dot = value.find(".")
if v_dot == 0:
value = self.package + "." + value
elif v_dot == -1:
value = self.package + "." + value
return value
@DeprecationWarning
def get_element(self, tag_name, attribute, **attribute_filter):
"""
:Deprecated: use `get_attribute_value()` instead
Return element in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:rtype: string
"""
for i in self.xml:
if self.xml[i] is None:
continue
tag = self.xml[i].findall('.//' + tag_name)
if len(tag) == 0:
return None
for item in tag:
skip_this_item = False
for attr, val in list(attribute_filter.items()):
attr_val = item.get(self._ns(attr))
if attr_val != val:
skip_this_item = True
break
if skip_this_item:
continue
value = item.get(self._ns(attribute))
if value is not None:
return value
return None
def get_all_attribute_value(
self, tag_name, attribute, format_value=True, **attribute_filter
):
"""
Return all the attribute values in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:param format_value: specify if the value needs to be formatted with packagename
:type format_value: boolean
"""
tags = self.find_tags(tag_name, **attribute_filter)
for tag in tags:
value = tag.get(attribute) or tag.get(self._ns(attribute))
if value is not None:
if format_value:
yield self._format_value(value)
else:
yield value
def get_attribute_value(
self, tag_name, attribute, format_value=False, **attribute_filter
):
"""
Return the attribute value in xml files which matches the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:param format_value: specify if the value needs to be formatted with packagename
:type format_value: boolean
"""
for value in self.get_all_attribute_value(
tag_name, attribute, format_value, **attribute_filter):
if value is not None:
return value
def get_value_from_tag(self, tag, attribute):
"""
Return the value of the attribute in a specific tag
:param tag: specify the tag element
:type tag: Element
:param attribute: specify the attribute
:type attribute: string
"""
# TODO: figure out if both android:name and name tag exist which one to give preference
value = tag.get(self._ns(attribute))
if value is None:
log.warning("Failed to get the attribute with namespace")
value = tag.get(attribute)
return value
def find_tags(self, tag_name, **attribute_filter):
"""
Return a list of all the matched tags in all available xml
:param tag: specify the tag name
:type tag: string
"""
all_tags = [
self.find_tags_from_xml(
i, tag_name, **attribute_filter
)
for i in self.xml
]
return [tag for tag_list in all_tags for tag in tag_list]
def find_tags_from_xml(
self, xml_name, tag_name, **attribute_filter
):
"""
Return a list of all the matched tags in a specific xml
:param xml_name: specify from which xml to pick the tag from
:type xml_name: string
:param tag_name: specify the tag name
:type tag_name: string
"""
xml = self.xml[xml_name]
if xml is None:
return []
if xml.tag == tag_name:
if self.is_tag_matched(
xml.tag, **attribute_filter
):
return [xml]
return []
tags = xml.findall(".//" + tag_name)
return [
tag for tag in tags if self.is_tag_matched(
tag, **attribute_filter
)
]
def is_tag_matched(self, tag, **attribute_filter):
"""
Return true if the attributes matches in attribute filter
:param tag: specify the tag element
:type tag: Element
:param attribute: specify the attribute
:type attribute: string
"""
if len(attribute_filter) <= 0:
return True
for attr, value in attribute_filter.items():
# TODO: figure out if both android:name and name tag exist which one to give preference
_value = tag.get(self._ns(attr))
if _value is None:
log.warning("Failed to get the attribute with namespace")
_value = tag.get(attr)
if _value != value:
return False
return True
def get_main_activities(self):
"""
Return names of the main activities
These values are read from the AndroidManifest.xml
:rtype: a set of str
"""
x = set()
y = set()
for i in self.xml:
if self.xml[i] is None:
continue
activities_and_aliases = self.xml[i].findall(".//activity") + \
self.xml[i].findall(".//activity-alias")
for item in activities_and_aliases:
# Some applications have more than one MAIN activity.
# For example: paid and free content
activityEnabled = item.get(self._ns("enabled"))
if activityEnabled == "false":
continue
for sitem in item.findall(".//action"):
val = sitem.get(self._ns("name"))
if val == "android.intent.action.MAIN":
activity = item.get(self._ns("name"))
if activity is not None:
x.add(item.get(self._ns("name")))
else:
log.warning('Main activity without name')
for sitem in item.findall(".//category"):
val = sitem.get(self._ns("name"))
if val == "android.intent.category.LAUNCHER":
activity = item.get(self._ns("name"))
if activity is not None:
y.add(item.get(self._ns("name")))
else:
log.warning('Launcher activity without name')
return x.intersection(y)
def get_main_activity(self):
"""
Return the name of the main activity
This value is read from the AndroidManifest.xml
:rtype: str
"""
activities = self.get_main_activities()
if len(activities) > 0:
return self._format_value(activities.pop())
return None
def get_activities(self):
"""
Return the android:name attribute of all activities
:rtype: a list of str
"""
return list(self.get_all_attribute_value("activity", "name"))
def get_services(self):
"""
Return the android:name attribute of all services
:rtype: a list of str
"""
return list(self.get_all_attribute_value("service", "name"))
def get_receivers(self):
"""
Return the android:name attribute of all receivers
:rtype: a list of string
"""
return list(self.get_all_attribute_value("receiver", "name"))
def get_providers(self):
"""
Return the android:name attribute of all providers
:rtype: a list of string
"""
return list(self.get_all_attribute_value("provider", "name"))
def get_intent_filters(self, itemtype, name):
"""
Find intent filters for a given item and name.
Intent filter are attached to activities, services or receivers.
You can search for the intent filters of such items and get a dictionary of all
attached actions and intent categories.
:param itemtype: the type of parent item to look for, e.g. `activity`, `service` or `receiver`
:param name: the `android:name` of the parent item, e.g. activity name
:return: a dictionary with the keys `action` and `category` containing the `android:name` of those items
"""
d = {"action": [], "category": []}
for i in self.xml:
# TODO: this can probably be solved using a single xpath
for item in self.xml[i].findall(".//" + itemtype):
if self._format_value(item.get(self._ns("name"))) == name:
for sitem in item.findall(".//intent-filter"):
for ssitem in sitem.findall("action"):
if ssitem.get(self._ns("name")) not in d["action"]:
d["action"].append(ssitem.get(self._ns("name")))
for ssitem in sitem.findall("category"):
if ssitem.get(self._ns("name")) not in d["category"]:
d["category"].append(ssitem.get(self._ns("name")))
if not d["action"]:
del d["action"]
if not d["category"]:
del d["category"]
return d
def get_permissions(self):
"""
Return permissions names declared in the AndroidManifest.xml.
It is possible that permissions are returned multiple times,
as this function does not filter the permissions, i.e. it shows you
exactly what was defined in the AndroidManifest.xml.
Implied permissions, which are granted automatically, are not returned
here. Use :meth:`get_uses_implied_permission_list` if you need a list
of implied permissions.
:returns: A list of permissions
:rtype: list
"""
return self.permissions
def get_uses_implied_permission_list(self):
"""
Return all permissions implied by the target SDK or other permissions.
:rtype: list of string
"""
target_sdk_version = self.get_effective_target_sdk_version()
READ_CALL_LOG = 'android.permission.READ_CALL_LOG'
READ_CONTACTS = 'android.permission.READ_CONTACTS'
READ_EXTERNAL_STORAGE = 'android.permission.READ_EXTERNAL_STORAGE'
READ_PHONE_STATE = 'android.permission.READ_PHONE_STATE'
WRITE_CALL_LOG = 'android.permission.WRITE_CALL_LOG'
WRITE_CONTACTS = 'android.permission.WRITE_CONTACTS'
WRITE_EXTERNAL_STORAGE = 'android.permission.WRITE_EXTERNAL_STORAGE'
implied = []
implied_WRITE_EXTERNAL_STORAGE = False
if target_sdk_version < 4:
if WRITE_EXTERNAL_STORAGE not in self.permissions:
implied.append([WRITE_EXTERNAL_STORAGE, None])
implied_WRITE_EXTERNAL_STORAGE = True
if READ_PHONE_STATE not in self.permissions:
implied.append([READ_PHONE_STATE, None])
if (WRITE_EXTERNAL_STORAGE in self.permissions or implied_WRITE_EXTERNAL_STORAGE) \
and READ_EXTERNAL_STORAGE not in self.permissions:
maxSdkVersion = None
for name, version in self.uses_permissions:
if name == WRITE_EXTERNAL_STORAGE:
maxSdkVersion = version
break
implied.append([READ_EXTERNAL_STORAGE, maxSdkVersion])
if target_sdk_version < 16:
if READ_CONTACTS in self.permissions \
and READ_CALL_LOG not in self.permissions:
implied.append([READ_CALL_LOG, None])
if WRITE_CONTACTS in self.permissions \
and WRITE_CALL_LOG not in self.permissions:
implied.append([WRITE_CALL_LOG, None])
return implied
def get_details_permissions(self):
"""
Return permissions with details
:rtype: dict of {permission: [protectionLevel, label, description]}
"""
l = {}
for i in self.permissions:
if i in self.permission_module:
x = self.permission_module[i]
l[i] = [x["protectionLevel"], x["label"], x["description"]]
else:
# FIXME: the permission might be signature, if it is defined by the app itself!
l[i] = ["normal", "Unknown permission from android reference",
"Unknown permission from android reference"]
return l
@DeprecationWarning
def get_requested_permissions(self):
"""
Returns all requested permissions.
It has the same result as :meth:`get_permissions` and might be removed in the future
:rtype: list of str
"""
return self.get_permissions()
def get_requested_aosp_permissions(self):
"""
Returns requested permissions declared within AOSP project.
This includes several other permissions as well, which are in the platform apps.
:rtype: list of str
"""
aosp_permissions = []
all_permissions = self.get_permissions()
for perm in all_permissions:
if perm in list(self.permission_module.keys()):
aosp_permissions.append(perm)
return aosp_permissions
def get_requested_aosp_permissions_details(self):
"""
Returns requested aosp permissions with details.
:rtype: dictionary
"""
l = {}
for i in self.permissions:
try:
l[i] = self.permission_module[i]
except KeyError:
# if we have not found permission do nothing
continue
return l
def get_requested_third_party_permissions(self):
"""
Returns list of requested permissions not declared within AOSP project.
:rtype: list of strings
"""
third_party_permissions = []
all_permissions = self.get_permissions()
for perm in all_permissions:
if perm not in list(self.permission_module.keys()):
third_party_permissions.append(perm)
return third_party_permissions
def get_declared_permissions(self):
"""
Returns list of the declared permissions.
:rtype: list of strings
"""
return list(self.declared_permissions.keys())
def get_declared_permissions_details(self):
"""
Returns declared permissions with the details.
:rtype: dict
"""
return self.declared_permissions
def get_max_sdk_version(self):
"""
Return the android:maxSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "maxSdkVersion")
def get_min_sdk_version(self):
"""
Return the android:minSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "minSdkVersion")
def get_target_sdk_version(self):
"""
Return the android:targetSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "targetSdkVersion")
def get_effective_target_sdk_version(self):
"""
Return the effective targetSdkVersion, always returns int > 0.
If the targetSdkVersion is not set, it defaults to 1. This is
set based on defaults as defined in:
https://developer.android.com/guide/topics/manifest/uses-sdk-element.html
:rtype: int
"""
target_sdk_version = self.get_target_sdk_version()
if not target_sdk_version:
target_sdk_version = self.get_min_sdk_version()
try:
return int(target_sdk_version)
except (ValueError, TypeError):
return 1
def get_libraries(self):
"""
Return the android:name attributes for libraries
:rtype: list
"""
return list(self.get_all_attribute_value("uses-library", "name"))
def get_features(self):
"""
Return a list of all android:names found for the tag uses-feature
in the AndroidManifest.xml
:return: list
"""
return list(self.get_all_attribute_value("uses-feature", "name"))
def is_wearable(self):
"""
Checks if this application is build for wearables by
checking if it uses the feature 'android.hardware.type.watch'
See: https://developer.android.com/training/wearables/apps/creating.html for more information.
Not every app is setting this feature (not even the example Google provides),
so it might be wise to not 100% rely on this feature.
:return: True if wearable, False otherwise
"""
return 'android.hardware.type.watch' in self.get_features()
def is_leanback(self):
"""
Checks if this application is build for TV (Leanback support)
by checkin if it uses the feature 'android.software.leanback'
:return: True if leanback feature is used, false otherwise
"""
return 'android.software.leanback' in self.get_features()
def is_androidtv(self):
"""
Checks if this application does not require a touchscreen,
as this is the rule to get into the TV section of the Play Store
See: https://developer.android.com/training/tv/start/start.html for more information.
:return: True if 'android.hardware.touchscreen' is not required, False otherwise
"""
return self.get_attribute_value(
'uses-feature', 'name', required="false",
name="android.hardware.touchscreen"
) == "android.hardware.touchscreen"
def new_zip(self, filename, deleted_files=None, new_files={}):
"""
Create a new zip file
:param filename: the output filename of the zip
:param deleted_files: a regex pattern to remove specific file
:param new_files: a dictionnary of new files
:type filename: string
:type deleted_files: None or a string
:type new_files: a dictionnary (key:filename, value:content of the file)
"""
zout = zipfile.ZipFile(filename, 'w')
for item in self.zip.infolist():
# Block one: deleted_files, or deleted_files and new_files
if deleted_files is not None:
if re.match(deleted_files, item.filename) is None:
# if the regex of deleted_files doesn't match the filename
if new_files is not False:
if item.filename in new_files:
# and if the filename is in new_files
zout.writestr(item, new_files[item.filename])
continue
# Otherwise, write the original file.
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
# Block two: deleted_files is None, new_files is not empty
elif new_files is not False:
if item.filename in new_files:
zout.writestr(item, new_files[item.filename])
else:
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
# Block three: deleted_files is None, new_files is empty.
# Just write out the default zip
else:
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
zout.close()
def get_android_manifest_axml(self):
"""
Return the :class:`AXMLPrinter` object which corresponds to the AndroidManifest.xml file
:rtype: :class:`~androguard.core.bytecodes.axml.AXMLPrinter`
"""
try:
return self.axml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_manifest_xml(self):
"""
Return the parsed xml object which corresponds to the AndroidManifest.xml file
:rtype: :class:`~lxml.etree.Element`
"""
try:
return self.xml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_resources(self):
"""
Return the :class:`~androguard.core.bytecodes.axml.ARSCParser`
object which corresponds to the resources.arsc file
:rtype: :class:`~androguard.core.bytecodes.axml.ARSCParser`
"""
try:
return self.arsc["resources.arsc"]
except KeyError:
if "resources.arsc" not in self.zip.namelist():
# There is a rare case, that no resource file is supplied.
# Maybe it was added manually, thus we check here
return None
self.arsc["resources.arsc"] = ARSCParser(self.zip.read("resources.arsc"))
return self.arsc["resources.arsc"]
def show(self):
self.get_files_types()
print("FILES: ")
for i in self.get_files():
try:
print("\t", i, self._files[i], "%x" % self.files_crc32[i])
except KeyError:
print("\t", i, "%x" % self.files_crc32[i])
print("DECLARED PERMISSIONS:")
declared_permissions = self.get_declared_permissions()
for i in declared_permissions:
print("\t", i)
print("REQUESTED PERMISSIONS:")
requested_permissions = self.get_permissions()
for i in requested_permissions:
print("\t", i)
print("MAIN ACTIVITY: ", self.get_main_activity())
print("ACTIVITIES: ")
activities = self.get_activities()
for i in activities:
filters = self.get_intent_filters("activity", i)
print("\t", i, filters or "")
print("SERVICES: ")
services = self.get_services()
for i in services:
filters = self.get_intent_filters("service", i)
print("\t", i, filters or "")
print("RECEIVERS: ")
receivers = self.get_receivers()
for i in receivers:
filters = self.get_intent_filters("receiver", i)
print("\t", i, filters or "")
print("PROVIDERS: ", self.get_providers())
@property
def application(self):
return self.get_app_name()
@property
def packagename(self):
return self.get_package()
@property
def version_name(self):
return self.get_androidversion_name()
@property
def version_code(self):
return self.get_androidversion_code()
@property
def icon_info(self):
return self.get_app_icon()
@property
def icon_data(self):
app_icon_file = self.get_app_icon()
app_icon_data = None
try:
app_icon_data = self.get_file(app_icon_file)
except FileNotPresent:
try:
app_icon_data = self.get_file(app_icon_file.encode().decode('cp437'))
except FileNotPresent:
pass
return app_icon_data
|
appknox/pyaxmlparser | pyaxmlparser/core.py | APK.get_files_crc32 | python | def get_files_crc32(self):
if self.files_crc32 == {}:
for i in self.get_files():
buffer = self.zip.read(i)
self.files_crc32[i] = crc32(buffer)
return self.files_crc32 | Calculates and returns a dictionary of filenames and CRC32
:return: dict of filename: CRC32 | train | https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/core.py#L528-L539 | [
"def get_files(self):\n \"\"\"\n Return the file names inside the APK.\n\n :rtype: a list of :class:`str`\n \"\"\"\n return self.zip.namelist()\n"
] | class APK(object):
def __init__(self, filename, raw=False, magic_file=None, skip_analysis=False, testzip=False):
"""
This class can access to all elements in an APK file
example::
APK("myfile.apk")
APK(read("myfile.apk"), raw=True)
:param filename: specify the path of the file, or raw data
:param raw: specify if the filename is a path or raw data (optional)
:param magic_file: specify the magic file (not used anymore - legacy only)
:param skip_analysis: Skip the analysis, e.g. no manifest files are read. (default: False)
:param testzip: Test the APK for integrity, e.g. if the ZIP file is broken.
Throw an exception on failure (default False)
:type filename: string
:type raw: boolean
:type magic_file: string
:type skip_analysis: boolean
:type testzip: boolean
"""
if magic_file:
log.warning("You set magic_file but this parameter is actually unused. You should remove it.")
self.filename = filename
self.xml = {}
self.axml = {}
self.arsc = {}
self.package = ""
self.androidversion = {}
self.permissions = []
self.uses_permissions = []
self.declared_permissions = {}
self.valid_apk = False
self._files = {}
self.files_crc32 = {}
if raw is True:
self.__raw = bytearray(filename)
self._sha256 = hashlib.sha256(self.__raw).hexdigest()
# Set the filename to something sane
self.filename = "raw_apk_sha256:{}".format(self._sha256)
else:
self.__raw = bytearray(read(filename))
self.zip = zipfile.ZipFile(io.BytesIO(self.__raw), mode="r")
if testzip:
# Test the zipfile for integrity before continuing.
# This process might be slow, as the whole file is read.
# Therefore it is possible to enable it as a separate feature.
#
# A short benchmark showed, that testing the zip takes about 10 times longer!
# e.g. normal zip loading (skip_analysis=True) takes about 0.01s, where
# testzip takes 0.1s!
ret = self.zip.testzip()
if ret is not None:
# we could print the filename here, but there are zip which are so broken
# That the filename is either very very long or does not make any sense.
# Thus we do not do it, the user might find out by using other tools.
raise BrokenAPKError("The APK is probably broken: testzip returned an error.")
if not skip_analysis:
self._apk_analysis()
def _ns(self, name):
"""
return the name including the Android namespace
"""
return NS_ANDROID + name
def _apk_analysis(self):
"""
Run analysis on the APK file.
This method is usually called by __init__ except if skip_analysis is False.
It will then parse the AndroidManifest.xml and set all fields in the APK class which can be
extracted from the Manifest.
"""
i = "AndroidManifest.xml"
try:
manifest_data = self.zip.read(i)
except KeyError:
log.warning("Missing AndroidManifest.xml. Is this an APK file?")
else:
ap = AXMLPrinter(manifest_data)
if not ap.is_valid():
log.error("Error while parsing AndroidManifest.xml - is the file valid?")
return
self.axml[i] = ap
self.xml[i] = self.axml[i].get_xml_obj()
if self.axml[i].is_packed():
log.warning("XML Seems to be packed, operations on the AndroidManifest.xml might fail.")
if self.xml[i] is not None:
if self.xml[i].tag != "manifest":
log.error("AndroidManifest.xml does not start with a <manifest> tag! Is this a valid APK?")
return
self.package = self.get_attribute_value("manifest", "package")
self.androidversion["Code"] = self.get_attribute_value("manifest", "versionCode")
self.androidversion["Name"] = self.get_attribute_value("manifest", "versionName")
permission = list(self.get_all_attribute_value("uses-permission", "name"))
self.permissions = list(set(self.permissions + permission))
for uses_permission in self.find_tags("uses-permission"):
self.uses_permissions.append([
self.get_value_from_tag(uses_permission, "name"),
self._get_permission_maxsdk(uses_permission)
])
# getting details of the declared permissions
for d_perm_item in self.find_tags('permission'):
d_perm_name = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "name")))
d_perm_label = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "label")))
d_perm_description = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "description")))
d_perm_permissionGroup = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "permissionGroup")))
d_perm_protectionLevel = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "protectionLevel")))
d_perm_details = {
"label": d_perm_label,
"description": d_perm_description,
"permissionGroup": d_perm_permissionGroup,
"protectionLevel": d_perm_protectionLevel,
}
self.declared_permissions[d_perm_name] = d_perm_details
self.valid_apk = True
def __getstate__(self):
"""
Function for pickling APK Objects.
We remove the zip from the Object, as it is not pickable
And it does not make any sense to pickle it anyways.
:return: the picklable APK Object without zip.
"""
# Upon pickling, we need to remove the ZipFile
x = self.__dict__
x['axml'] = str(x['axml'])
x['xml'] = str(x['xml'])
del x['zip']
return x
def __setstate__(self, state):
"""
Load a pickled APK Object and restore the state
We load the zip file back by reading __raw from the Object.
:param state: pickled state
"""
self.__dict__ = state
self.zip = zipfile.ZipFile(io.BytesIO(self.__raw), mode="r")
def _get_res_string_value(self, string):
if not string.startswith('@string/'):
return string
string_key = string[9:]
res_parser = self.get_android_resources()
if not res_parser:
return ''
string_value = ''
for package_name in res_parser.get_packages_names():
extracted_values = res_parser.get_string(package_name, string_key)
if extracted_values:
string_value = extracted_values[1]
break
return string_value
def _get_permission_maxsdk(self, item):
maxSdkVersion = None
try:
maxSdkVersion = int(self.get_value_from_tag(item, "maxSdkVersion"))
except ValueError:
log.warning(self.get_max_sdk_version() + 'is not a valid value for <uses-permission> maxSdkVersion')
except TypeError:
pass
return maxSdkVersion
def is_valid_APK(self):
"""
Return true if the APK is valid, false otherwise.
An APK is seen as valid, if the AndroidManifest.xml could be successful parsed.
This does not mean that the APK has a valid signature nor that the APK
can be installed on an Android system.
:rtype: boolean
"""
return self.valid_apk
def get_filename(self):
"""
Return the filename of the APK
:rtype: :class:`str`
"""
return self.filename
def get_app_name(self):
"""
Return the appname of the APK
This name is read from the AndroidManifest.xml
using the application android:label.
If no label exists, the android:label of the main activity is used.
If there is also no main activity label, an empty string is returned.
:rtype: :class:`str`
"""
app_name = self.get_attribute_value('application', 'label')
if app_name is None:
activities = self.get_main_activities()
main_activity_name = None
if len(activities) > 0:
main_activity_name = activities.pop()
app_name = self.get_attribute_value(
'activity', 'label', name=main_activity_name
)
if app_name is None:
# No App name set
# TODO return packagename instead?
log.warning("It looks like that no app name is set for the main activity!")
return ""
if app_name.startswith("@"):
res_parser = self.get_android_resources()
if not res_parser:
# TODO: What should be the correct return value here?
return app_name
res_id, package = res_parser.parse_id(app_name)
# If the package name is the same as the APK package,
# we should be able to resolve the ID.
if package and package != self.get_package():
if package == 'android':
# TODO: we can not resolve this, as we lack framework-res.apk
# one exception would be when parsing framework-res.apk directly.
log.warning("Resource ID with android package name encountered! "
"Will not resolve, framework-res.apk would be required.")
return app_name
else:
# TODO should look this up, might be in the resources
log.warning("Resource ID with Package name '{}' encountered! Will not resolve".format(package))
return app_name
try:
app_name = res_parser.get_resolved_res_configs(
res_id,
ARSCResTableConfig.default_config())[0][1]
except Exception as e:
log.warning("Exception selecting app name: %s" % e)
return app_name
def get_app_icon(self, max_dpi=65536):
"""
Return the first icon file name, which density is not greater than max_dpi,
unless exact icon resolution is set in the manifest, in which case
return the exact file.
This information is read from the AndroidManifest.xml
From https://developer.android.com/guide/practices/screens_support.html
and https://developer.android.com/ndk/reference/group___configuration.html
* DEFAULT 0dpi
* ldpi (low) 120dpi
* mdpi (medium) 160dpi
* TV 213dpi
* hdpi (high) 240dpi
* xhdpi (extra-high) 320dpi
* xxhdpi (extra-extra-high) 480dpi
* xxxhdpi (extra-extra-extra-high) 640dpi
* anydpi 65534dpi (0xFFFE)
* nodpi 65535dpi (0xFFFF)
There is a difference between nodpi and anydpi:
nodpi will be used if no other density is specified. Or the density does not match.
nodpi is the fallback for everything else. If there is a resource that matches the DPI,
this is used.
anydpi is also valid for all densities but in this case, anydpi will overrule all other files!
Therefore anydpi is usually used with vector graphics and with constraints on the API level.
For example adaptive icons are usually marked as anydpi.
When it comes now to selecting an icon, there is the following flow:
1) is there an anydpi icon?
2) is there an icon for the dpi of the device?
3) is there a nodpi icon?
4) (only on very old devices) is there a icon with dpi 0 (the default)
For more information read here: https://stackoverflow.com/a/34370735/446140
:rtype: :class:`str`
"""
main_activity_name = self.get_main_activity()
app_icon = self.get_attribute_value(
'activity', 'icon', name=main_activity_name)
if not app_icon:
app_icon = self.get_attribute_value('application', 'icon')
res_parser = self.get_android_resources()
if not res_parser:
# Can not do anything below this point to resolve...
return None
if not app_icon:
res_id = res_parser.get_res_id_by_key(self.package, 'mipmap', 'ic_launcher')
if res_id:
app_icon = "@%x" % res_id
if not app_icon:
res_id = res_parser.get_res_id_by_key(self.package, 'drawable', 'ic_launcher')
if res_id:
app_icon = "@%x" % res_id
if not app_icon:
# If the icon can not be found, return now
return None
if app_icon.startswith("@"):
res_id = int(app_icon[1:], 16)
candidates = res_parser.get_resolved_res_configs(res_id)
app_icon = None
current_dpi = -1
try:
for config, file_name in candidates:
dpi = config.get_density()
if current_dpi < dpi <= max_dpi:
app_icon = file_name
current_dpi = dpi
except Exception as e:
log.warning("Exception selecting app icon: %s" % e)
return app_icon
def get_package(self):
"""
Return the name of the package
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.package
def get_androidversion_code(self):
"""
Return the android version code
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.androidversion["Code"]
def get_androidversion_name(self):
"""
Return the android version name
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.androidversion["Name"]
def get_files(self):
"""
Return the file names inside the APK.
:rtype: a list of :class:`str`
"""
return self.zip.namelist()
def _get_file_magic_name(self, buffer):
"""
Return the filetype guessed for a buffer
:param buffer: bytes
:return: str of filetype
"""
default = "Unknown"
ftype = None
try:
# Magic is optional
import magic
except ImportError:
return default
try:
# There are several implementations of magic,
# unfortunately all called magic
# We use this one: https://github.com/ahupp/python-magic/
getattr(magic, "MagicException")
except AttributeError:
# Looks like no magic was installed
return default
try:
ftype = magic.from_buffer(buffer[:1024])
except magic.MagicError as e:
log.exception("Error getting the magic type!")
return default
if not ftype:
return default
else:
return self._patch_magic(buffer, ftype)
@property
def files(self):
"""
Returns a dictionary of filenames and detected magic type
:return: dictionary of files and their mime type
"""
return self.get_files_types()
def get_files_types(self):
"""
Return the files inside the APK with their associated types (by using python-magic)
:rtype: a dictionnary
"""
if self._files == {}:
# Generate File Types / CRC List
for i in self.get_files():
buffer = self.zip.read(i)
self.files_crc32[i] = crc32(buffer)
# FIXME why not use the crc from the zipfile?
# should be validated as well.
# crc = self.zip.getinfo(i).CRC
self._files[i] = self._get_file_magic_name(buffer)
return self._files
def _patch_magic(self, buffer, orig):
"""
Overwrite some probably wrong detections by mime libraries
:param buffer: bytes of the file to detect
:param orig: guess by mime libary
:return: corrected guess
"""
if ("Zip" in orig) or ('(JAR)' in orig):
val = is_android_raw(buffer)
if val == "APK":
return "Android application package file"
return orig
def get_files_information(self):
"""
Return the files inside the APK with their associated types and crc32
:rtype: str, str, int
"""
for k in self.get_files():
yield k, self.get_files_types()[k], self.get_files_crc32()[k]
def get_raw(self):
"""
Return raw bytes of the APK
:rtype: bytes
"""
return self.__raw
def get_file(self, filename):
"""
Return the raw data of the specified filename
inside the APK
:rtype: bytes
"""
try:
return self.zip.read(filename)
except KeyError:
raise FileNotPresent(filename)
def get_dex(self):
"""
Return the raw data of the classes dex file
This will give you the data of the file called `classes.dex`
inside the APK. If the APK has multiple DEX files, you need to use :func:`~APK.get_all_dex`.
:rtype: bytes
"""
try:
return self.get_file("classes.dex")
except FileNotPresent:
return ""
def get_dex_names(self):
"""
Return the names of all DEX files found in the APK.
This method only accounts for "offical" dex files, i.e. all files
in the root directory of the APK named classes.dex or classes[0-9]+.dex
:rtype: a list of str
"""
dexre = re.compile("classes(\d*).dex")
return filter(lambda x: dexre.match(x), self.get_files())
def get_all_dex(self):
"""
Return the raw data of all classes dex files
:rtype: a generator of bytes
"""
for dex_name in self.get_dex_names():
yield self.get_file(dex_name)
def is_multidex(self):
"""
Test if the APK has multiple DEX files
:return: True if multiple dex found, otherwise False
"""
dexre = re.compile("^classes(\d+)?.dex$")
return len([instance for instance in self.get_files() if dexre.search(instance)]) > 1
@DeprecationWarning
def get_elements(self, tag_name, attribute, with_namespace=True):
"""
Deprecated: use `get_all_attribute_value()` instead
Return elements in xml files which match with the tag name and the specific attribute
:param tag_name: a string which specify the tag name
:param attribute: a string which specify the attribute
"""
for i in self.xml:
if self.xml[i] is None:
continue
for item in self.xml[i].findall('.//' + tag_name):
if with_namespace:
value = item.get(self._ns(attribute))
else:
value = item.get(attribute)
# There might be an attribute without the namespace
if value:
yield self._format_value(value)
def _format_value(self, value):
"""
Format a value with packagename, if not already set
:param value:
:return:
"""
if len(value) > 0:
if value[0] == ".":
value = self.package + value
else:
v_dot = value.find(".")
if v_dot == 0:
value = self.package + "." + value
elif v_dot == -1:
value = self.package + "." + value
return value
@DeprecationWarning
def get_element(self, tag_name, attribute, **attribute_filter):
"""
:Deprecated: use `get_attribute_value()` instead
Return element in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:rtype: string
"""
for i in self.xml:
if self.xml[i] is None:
continue
tag = self.xml[i].findall('.//' + tag_name)
if len(tag) == 0:
return None
for item in tag:
skip_this_item = False
for attr, val in list(attribute_filter.items()):
attr_val = item.get(self._ns(attr))
if attr_val != val:
skip_this_item = True
break
if skip_this_item:
continue
value = item.get(self._ns(attribute))
if value is not None:
return value
return None
def get_all_attribute_value(
self, tag_name, attribute, format_value=True, **attribute_filter
):
"""
Return all the attribute values in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:param format_value: specify if the value needs to be formatted with packagename
:type format_value: boolean
"""
tags = self.find_tags(tag_name, **attribute_filter)
for tag in tags:
value = tag.get(attribute) or tag.get(self._ns(attribute))
if value is not None:
if format_value:
yield self._format_value(value)
else:
yield value
def get_attribute_value(
self, tag_name, attribute, format_value=False, **attribute_filter
):
"""
Return the attribute value in xml files which matches the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:param format_value: specify if the value needs to be formatted with packagename
:type format_value: boolean
"""
for value in self.get_all_attribute_value(
tag_name, attribute, format_value, **attribute_filter):
if value is not None:
return value
def get_value_from_tag(self, tag, attribute):
"""
Return the value of the attribute in a specific tag
:param tag: specify the tag element
:type tag: Element
:param attribute: specify the attribute
:type attribute: string
"""
# TODO: figure out if both android:name and name tag exist which one to give preference
value = tag.get(self._ns(attribute))
if value is None:
log.warning("Failed to get the attribute with namespace")
value = tag.get(attribute)
return value
def find_tags(self, tag_name, **attribute_filter):
"""
Return a list of all the matched tags in all available xml
:param tag: specify the tag name
:type tag: string
"""
all_tags = [
self.find_tags_from_xml(
i, tag_name, **attribute_filter
)
for i in self.xml
]
return [tag for tag_list in all_tags for tag in tag_list]
def find_tags_from_xml(
self, xml_name, tag_name, **attribute_filter
):
"""
Return a list of all the matched tags in a specific xml
:param xml_name: specify from which xml to pick the tag from
:type xml_name: string
:param tag_name: specify the tag name
:type tag_name: string
"""
xml = self.xml[xml_name]
if xml is None:
return []
if xml.tag == tag_name:
if self.is_tag_matched(
xml.tag, **attribute_filter
):
return [xml]
return []
tags = xml.findall(".//" + tag_name)
return [
tag for tag in tags if self.is_tag_matched(
tag, **attribute_filter
)
]
def is_tag_matched(self, tag, **attribute_filter):
"""
Return true if the attributes matches in attribute filter
:param tag: specify the tag element
:type tag: Element
:param attribute: specify the attribute
:type attribute: string
"""
if len(attribute_filter) <= 0:
return True
for attr, value in attribute_filter.items():
# TODO: figure out if both android:name and name tag exist which one to give preference
_value = tag.get(self._ns(attr))
if _value is None:
log.warning("Failed to get the attribute with namespace")
_value = tag.get(attr)
if _value != value:
return False
return True
def get_main_activities(self):
"""
Return names of the main activities
These values are read from the AndroidManifest.xml
:rtype: a set of str
"""
x = set()
y = set()
for i in self.xml:
if self.xml[i] is None:
continue
activities_and_aliases = self.xml[i].findall(".//activity") + \
self.xml[i].findall(".//activity-alias")
for item in activities_and_aliases:
# Some applications have more than one MAIN activity.
# For example: paid and free content
activityEnabled = item.get(self._ns("enabled"))
if activityEnabled == "false":
continue
for sitem in item.findall(".//action"):
val = sitem.get(self._ns("name"))
if val == "android.intent.action.MAIN":
activity = item.get(self._ns("name"))
if activity is not None:
x.add(item.get(self._ns("name")))
else:
log.warning('Main activity without name')
for sitem in item.findall(".//category"):
val = sitem.get(self._ns("name"))
if val == "android.intent.category.LAUNCHER":
activity = item.get(self._ns("name"))
if activity is not None:
y.add(item.get(self._ns("name")))
else:
log.warning('Launcher activity without name')
return x.intersection(y)
def get_main_activity(self):
"""
Return the name of the main activity
This value is read from the AndroidManifest.xml
:rtype: str
"""
activities = self.get_main_activities()
if len(activities) > 0:
return self._format_value(activities.pop())
return None
def get_activities(self):
"""
Return the android:name attribute of all activities
:rtype: a list of str
"""
return list(self.get_all_attribute_value("activity", "name"))
def get_services(self):
"""
Return the android:name attribute of all services
:rtype: a list of str
"""
return list(self.get_all_attribute_value("service", "name"))
def get_receivers(self):
"""
Return the android:name attribute of all receivers
:rtype: a list of string
"""
return list(self.get_all_attribute_value("receiver", "name"))
def get_providers(self):
"""
Return the android:name attribute of all providers
:rtype: a list of string
"""
return list(self.get_all_attribute_value("provider", "name"))
def get_intent_filters(self, itemtype, name):
"""
Find intent filters for a given item and name.
Intent filter are attached to activities, services or receivers.
You can search for the intent filters of such items and get a dictionary of all
attached actions and intent categories.
:param itemtype: the type of parent item to look for, e.g. `activity`, `service` or `receiver`
:param name: the `android:name` of the parent item, e.g. activity name
:return: a dictionary with the keys `action` and `category` containing the `android:name` of those items
"""
d = {"action": [], "category": []}
for i in self.xml:
# TODO: this can probably be solved using a single xpath
for item in self.xml[i].findall(".//" + itemtype):
if self._format_value(item.get(self._ns("name"))) == name:
for sitem in item.findall(".//intent-filter"):
for ssitem in sitem.findall("action"):
if ssitem.get(self._ns("name")) not in d["action"]:
d["action"].append(ssitem.get(self._ns("name")))
for ssitem in sitem.findall("category"):
if ssitem.get(self._ns("name")) not in d["category"]:
d["category"].append(ssitem.get(self._ns("name")))
if not d["action"]:
del d["action"]
if not d["category"]:
del d["category"]
return d
def get_permissions(self):
"""
Return permissions names declared in the AndroidManifest.xml.
It is possible that permissions are returned multiple times,
as this function does not filter the permissions, i.e. it shows you
exactly what was defined in the AndroidManifest.xml.
Implied permissions, which are granted automatically, are not returned
here. Use :meth:`get_uses_implied_permission_list` if you need a list
of implied permissions.
:returns: A list of permissions
:rtype: list
"""
return self.permissions
def get_uses_implied_permission_list(self):
"""
Return all permissions implied by the target SDK or other permissions.
:rtype: list of string
"""
target_sdk_version = self.get_effective_target_sdk_version()
READ_CALL_LOG = 'android.permission.READ_CALL_LOG'
READ_CONTACTS = 'android.permission.READ_CONTACTS'
READ_EXTERNAL_STORAGE = 'android.permission.READ_EXTERNAL_STORAGE'
READ_PHONE_STATE = 'android.permission.READ_PHONE_STATE'
WRITE_CALL_LOG = 'android.permission.WRITE_CALL_LOG'
WRITE_CONTACTS = 'android.permission.WRITE_CONTACTS'
WRITE_EXTERNAL_STORAGE = 'android.permission.WRITE_EXTERNAL_STORAGE'
implied = []
implied_WRITE_EXTERNAL_STORAGE = False
if target_sdk_version < 4:
if WRITE_EXTERNAL_STORAGE not in self.permissions:
implied.append([WRITE_EXTERNAL_STORAGE, None])
implied_WRITE_EXTERNAL_STORAGE = True
if READ_PHONE_STATE not in self.permissions:
implied.append([READ_PHONE_STATE, None])
if (WRITE_EXTERNAL_STORAGE in self.permissions or implied_WRITE_EXTERNAL_STORAGE) \
and READ_EXTERNAL_STORAGE not in self.permissions:
maxSdkVersion = None
for name, version in self.uses_permissions:
if name == WRITE_EXTERNAL_STORAGE:
maxSdkVersion = version
break
implied.append([READ_EXTERNAL_STORAGE, maxSdkVersion])
if target_sdk_version < 16:
if READ_CONTACTS in self.permissions \
and READ_CALL_LOG not in self.permissions:
implied.append([READ_CALL_LOG, None])
if WRITE_CONTACTS in self.permissions \
and WRITE_CALL_LOG not in self.permissions:
implied.append([WRITE_CALL_LOG, None])
return implied
def get_details_permissions(self):
"""
Return permissions with details
:rtype: dict of {permission: [protectionLevel, label, description]}
"""
l = {}
for i in self.permissions:
if i in self.permission_module:
x = self.permission_module[i]
l[i] = [x["protectionLevel"], x["label"], x["description"]]
else:
# FIXME: the permission might be signature, if it is defined by the app itself!
l[i] = ["normal", "Unknown permission from android reference",
"Unknown permission from android reference"]
return l
@DeprecationWarning
def get_requested_permissions(self):
"""
Returns all requested permissions.
It has the same result as :meth:`get_permissions` and might be removed in the future
:rtype: list of str
"""
return self.get_permissions()
def get_requested_aosp_permissions(self):
"""
Returns requested permissions declared within AOSP project.
This includes several other permissions as well, which are in the platform apps.
:rtype: list of str
"""
aosp_permissions = []
all_permissions = self.get_permissions()
for perm in all_permissions:
if perm in list(self.permission_module.keys()):
aosp_permissions.append(perm)
return aosp_permissions
def get_requested_aosp_permissions_details(self):
"""
Returns requested aosp permissions with details.
:rtype: dictionary
"""
l = {}
for i in self.permissions:
try:
l[i] = self.permission_module[i]
except KeyError:
# if we have not found permission do nothing
continue
return l
def get_requested_third_party_permissions(self):
"""
Returns list of requested permissions not declared within AOSP project.
:rtype: list of strings
"""
third_party_permissions = []
all_permissions = self.get_permissions()
for perm in all_permissions:
if perm not in list(self.permission_module.keys()):
third_party_permissions.append(perm)
return third_party_permissions
def get_declared_permissions(self):
"""
Returns list of the declared permissions.
:rtype: list of strings
"""
return list(self.declared_permissions.keys())
def get_declared_permissions_details(self):
"""
Returns declared permissions with the details.
:rtype: dict
"""
return self.declared_permissions
def get_max_sdk_version(self):
"""
Return the android:maxSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "maxSdkVersion")
def get_min_sdk_version(self):
"""
Return the android:minSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "minSdkVersion")
def get_target_sdk_version(self):
"""
Return the android:targetSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "targetSdkVersion")
def get_effective_target_sdk_version(self):
"""
Return the effective targetSdkVersion, always returns int > 0.
If the targetSdkVersion is not set, it defaults to 1. This is
set based on defaults as defined in:
https://developer.android.com/guide/topics/manifest/uses-sdk-element.html
:rtype: int
"""
target_sdk_version = self.get_target_sdk_version()
if not target_sdk_version:
target_sdk_version = self.get_min_sdk_version()
try:
return int(target_sdk_version)
except (ValueError, TypeError):
return 1
def get_libraries(self):
"""
Return the android:name attributes for libraries
:rtype: list
"""
return list(self.get_all_attribute_value("uses-library", "name"))
def get_features(self):
"""
Return a list of all android:names found for the tag uses-feature
in the AndroidManifest.xml
:return: list
"""
return list(self.get_all_attribute_value("uses-feature", "name"))
def is_wearable(self):
"""
Checks if this application is build for wearables by
checking if it uses the feature 'android.hardware.type.watch'
See: https://developer.android.com/training/wearables/apps/creating.html for more information.
Not every app is setting this feature (not even the example Google provides),
so it might be wise to not 100% rely on this feature.
:return: True if wearable, False otherwise
"""
return 'android.hardware.type.watch' in self.get_features()
def is_leanback(self):
"""
Checks if this application is build for TV (Leanback support)
by checkin if it uses the feature 'android.software.leanback'
:return: True if leanback feature is used, false otherwise
"""
return 'android.software.leanback' in self.get_features()
def is_androidtv(self):
"""
Checks if this application does not require a touchscreen,
as this is the rule to get into the TV section of the Play Store
See: https://developer.android.com/training/tv/start/start.html for more information.
:return: True if 'android.hardware.touchscreen' is not required, False otherwise
"""
return self.get_attribute_value(
'uses-feature', 'name', required="false",
name="android.hardware.touchscreen"
) == "android.hardware.touchscreen"
def new_zip(self, filename, deleted_files=None, new_files={}):
"""
Create a new zip file
:param filename: the output filename of the zip
:param deleted_files: a regex pattern to remove specific file
:param new_files: a dictionnary of new files
:type filename: string
:type deleted_files: None or a string
:type new_files: a dictionnary (key:filename, value:content of the file)
"""
zout = zipfile.ZipFile(filename, 'w')
for item in self.zip.infolist():
# Block one: deleted_files, or deleted_files and new_files
if deleted_files is not None:
if re.match(deleted_files, item.filename) is None:
# if the regex of deleted_files doesn't match the filename
if new_files is not False:
if item.filename in new_files:
# and if the filename is in new_files
zout.writestr(item, new_files[item.filename])
continue
# Otherwise, write the original file.
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
# Block two: deleted_files is None, new_files is not empty
elif new_files is not False:
if item.filename in new_files:
zout.writestr(item, new_files[item.filename])
else:
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
# Block three: deleted_files is None, new_files is empty.
# Just write out the default zip
else:
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
zout.close()
def get_android_manifest_axml(self):
"""
Return the :class:`AXMLPrinter` object which corresponds to the AndroidManifest.xml file
:rtype: :class:`~androguard.core.bytecodes.axml.AXMLPrinter`
"""
try:
return self.axml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_manifest_xml(self):
"""
Return the parsed xml object which corresponds to the AndroidManifest.xml file
:rtype: :class:`~lxml.etree.Element`
"""
try:
return self.xml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_resources(self):
"""
Return the :class:`~androguard.core.bytecodes.axml.ARSCParser`
object which corresponds to the resources.arsc file
:rtype: :class:`~androguard.core.bytecodes.axml.ARSCParser`
"""
try:
return self.arsc["resources.arsc"]
except KeyError:
if "resources.arsc" not in self.zip.namelist():
# There is a rare case, that no resource file is supplied.
# Maybe it was added manually, thus we check here
return None
self.arsc["resources.arsc"] = ARSCParser(self.zip.read("resources.arsc"))
return self.arsc["resources.arsc"]
def show(self):
self.get_files_types()
print("FILES: ")
for i in self.get_files():
try:
print("\t", i, self._files[i], "%x" % self.files_crc32[i])
except KeyError:
print("\t", i, "%x" % self.files_crc32[i])
print("DECLARED PERMISSIONS:")
declared_permissions = self.get_declared_permissions()
for i in declared_permissions:
print("\t", i)
print("REQUESTED PERMISSIONS:")
requested_permissions = self.get_permissions()
for i in requested_permissions:
print("\t", i)
print("MAIN ACTIVITY: ", self.get_main_activity())
print("ACTIVITIES: ")
activities = self.get_activities()
for i in activities:
filters = self.get_intent_filters("activity", i)
print("\t", i, filters or "")
print("SERVICES: ")
services = self.get_services()
for i in services:
filters = self.get_intent_filters("service", i)
print("\t", i, filters or "")
print("RECEIVERS: ")
receivers = self.get_receivers()
for i in receivers:
filters = self.get_intent_filters("receiver", i)
print("\t", i, filters or "")
print("PROVIDERS: ", self.get_providers())
@property
def application(self):
return self.get_app_name()
@property
def packagename(self):
return self.get_package()
@property
def version_name(self):
return self.get_androidversion_name()
@property
def version_code(self):
return self.get_androidversion_code()
@property
def icon_info(self):
return self.get_app_icon()
@property
def icon_data(self):
app_icon_file = self.get_app_icon()
app_icon_data = None
try:
app_icon_data = self.get_file(app_icon_file)
except FileNotPresent:
try:
app_icon_data = self.get_file(app_icon_file.encode().decode('cp437'))
except FileNotPresent:
pass
return app_icon_data
|
appknox/pyaxmlparser | pyaxmlparser/core.py | APK.is_multidex | python | def is_multidex(self):
dexre = re.compile("^classes(\d+)?.dex$")
return len([instance for instance in self.get_files() if dexre.search(instance)]) > 1 | Test if the APK has multiple DEX files
:return: True if multiple dex found, otherwise False | train | https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/core.py#L604-L611 | [
"def get_files(self):\n \"\"\"\n Return the file names inside the APK.\n\n :rtype: a list of :class:`str`\n \"\"\"\n return self.zip.namelist()\n"
] | class APK(object):
def __init__(self, filename, raw=False, magic_file=None, skip_analysis=False, testzip=False):
"""
This class can access to all elements in an APK file
example::
APK("myfile.apk")
APK(read("myfile.apk"), raw=True)
:param filename: specify the path of the file, or raw data
:param raw: specify if the filename is a path or raw data (optional)
:param magic_file: specify the magic file (not used anymore - legacy only)
:param skip_analysis: Skip the analysis, e.g. no manifest files are read. (default: False)
:param testzip: Test the APK for integrity, e.g. if the ZIP file is broken.
Throw an exception on failure (default False)
:type filename: string
:type raw: boolean
:type magic_file: string
:type skip_analysis: boolean
:type testzip: boolean
"""
if magic_file:
log.warning("You set magic_file but this parameter is actually unused. You should remove it.")
self.filename = filename
self.xml = {}
self.axml = {}
self.arsc = {}
self.package = ""
self.androidversion = {}
self.permissions = []
self.uses_permissions = []
self.declared_permissions = {}
self.valid_apk = False
self._files = {}
self.files_crc32 = {}
if raw is True:
self.__raw = bytearray(filename)
self._sha256 = hashlib.sha256(self.__raw).hexdigest()
# Set the filename to something sane
self.filename = "raw_apk_sha256:{}".format(self._sha256)
else:
self.__raw = bytearray(read(filename))
self.zip = zipfile.ZipFile(io.BytesIO(self.__raw), mode="r")
if testzip:
# Test the zipfile for integrity before continuing.
# This process might be slow, as the whole file is read.
# Therefore it is possible to enable it as a separate feature.
#
# A short benchmark showed, that testing the zip takes about 10 times longer!
# e.g. normal zip loading (skip_analysis=True) takes about 0.01s, where
# testzip takes 0.1s!
ret = self.zip.testzip()
if ret is not None:
# we could print the filename here, but there are zip which are so broken
# That the filename is either very very long or does not make any sense.
# Thus we do not do it, the user might find out by using other tools.
raise BrokenAPKError("The APK is probably broken: testzip returned an error.")
if not skip_analysis:
self._apk_analysis()
def _ns(self, name):
"""
return the name including the Android namespace
"""
return NS_ANDROID + name
def _apk_analysis(self):
"""
Run analysis on the APK file.
This method is usually called by __init__ except if skip_analysis is False.
It will then parse the AndroidManifest.xml and set all fields in the APK class which can be
extracted from the Manifest.
"""
i = "AndroidManifest.xml"
try:
manifest_data = self.zip.read(i)
except KeyError:
log.warning("Missing AndroidManifest.xml. Is this an APK file?")
else:
ap = AXMLPrinter(manifest_data)
if not ap.is_valid():
log.error("Error while parsing AndroidManifest.xml - is the file valid?")
return
self.axml[i] = ap
self.xml[i] = self.axml[i].get_xml_obj()
if self.axml[i].is_packed():
log.warning("XML Seems to be packed, operations on the AndroidManifest.xml might fail.")
if self.xml[i] is not None:
if self.xml[i].tag != "manifest":
log.error("AndroidManifest.xml does not start with a <manifest> tag! Is this a valid APK?")
return
self.package = self.get_attribute_value("manifest", "package")
self.androidversion["Code"] = self.get_attribute_value("manifest", "versionCode")
self.androidversion["Name"] = self.get_attribute_value("manifest", "versionName")
permission = list(self.get_all_attribute_value("uses-permission", "name"))
self.permissions = list(set(self.permissions + permission))
for uses_permission in self.find_tags("uses-permission"):
self.uses_permissions.append([
self.get_value_from_tag(uses_permission, "name"),
self._get_permission_maxsdk(uses_permission)
])
# getting details of the declared permissions
for d_perm_item in self.find_tags('permission'):
d_perm_name = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "name")))
d_perm_label = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "label")))
d_perm_description = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "description")))
d_perm_permissionGroup = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "permissionGroup")))
d_perm_protectionLevel = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "protectionLevel")))
d_perm_details = {
"label": d_perm_label,
"description": d_perm_description,
"permissionGroup": d_perm_permissionGroup,
"protectionLevel": d_perm_protectionLevel,
}
self.declared_permissions[d_perm_name] = d_perm_details
self.valid_apk = True
def __getstate__(self):
"""
Function for pickling APK Objects.
We remove the zip from the Object, as it is not pickable
And it does not make any sense to pickle it anyways.
:return: the picklable APK Object without zip.
"""
# Upon pickling, we need to remove the ZipFile
x = self.__dict__
x['axml'] = str(x['axml'])
x['xml'] = str(x['xml'])
del x['zip']
return x
def __setstate__(self, state):
"""
Load a pickled APK Object and restore the state
We load the zip file back by reading __raw from the Object.
:param state: pickled state
"""
self.__dict__ = state
self.zip = zipfile.ZipFile(io.BytesIO(self.__raw), mode="r")
def _get_res_string_value(self, string):
if not string.startswith('@string/'):
return string
string_key = string[9:]
res_parser = self.get_android_resources()
if not res_parser:
return ''
string_value = ''
for package_name in res_parser.get_packages_names():
extracted_values = res_parser.get_string(package_name, string_key)
if extracted_values:
string_value = extracted_values[1]
break
return string_value
def _get_permission_maxsdk(self, item):
maxSdkVersion = None
try:
maxSdkVersion = int(self.get_value_from_tag(item, "maxSdkVersion"))
except ValueError:
log.warning(self.get_max_sdk_version() + 'is not a valid value for <uses-permission> maxSdkVersion')
except TypeError:
pass
return maxSdkVersion
def is_valid_APK(self):
"""
Return true if the APK is valid, false otherwise.
An APK is seen as valid, if the AndroidManifest.xml could be successful parsed.
This does not mean that the APK has a valid signature nor that the APK
can be installed on an Android system.
:rtype: boolean
"""
return self.valid_apk
def get_filename(self):
"""
Return the filename of the APK
:rtype: :class:`str`
"""
return self.filename
def get_app_name(self):
"""
Return the appname of the APK
This name is read from the AndroidManifest.xml
using the application android:label.
If no label exists, the android:label of the main activity is used.
If there is also no main activity label, an empty string is returned.
:rtype: :class:`str`
"""
app_name = self.get_attribute_value('application', 'label')
if app_name is None:
activities = self.get_main_activities()
main_activity_name = None
if len(activities) > 0:
main_activity_name = activities.pop()
app_name = self.get_attribute_value(
'activity', 'label', name=main_activity_name
)
if app_name is None:
# No App name set
# TODO return packagename instead?
log.warning("It looks like that no app name is set for the main activity!")
return ""
if app_name.startswith("@"):
res_parser = self.get_android_resources()
if not res_parser:
# TODO: What should be the correct return value here?
return app_name
res_id, package = res_parser.parse_id(app_name)
# If the package name is the same as the APK package,
# we should be able to resolve the ID.
if package and package != self.get_package():
if package == 'android':
# TODO: we can not resolve this, as we lack framework-res.apk
# one exception would be when parsing framework-res.apk directly.
log.warning("Resource ID with android package name encountered! "
"Will not resolve, framework-res.apk would be required.")
return app_name
else:
# TODO should look this up, might be in the resources
log.warning("Resource ID with Package name '{}' encountered! Will not resolve".format(package))
return app_name
try:
app_name = res_parser.get_resolved_res_configs(
res_id,
ARSCResTableConfig.default_config())[0][1]
except Exception as e:
log.warning("Exception selecting app name: %s" % e)
return app_name
def get_app_icon(self, max_dpi=65536):
"""
Return the first icon file name, which density is not greater than max_dpi,
unless exact icon resolution is set in the manifest, in which case
return the exact file.
This information is read from the AndroidManifest.xml
From https://developer.android.com/guide/practices/screens_support.html
and https://developer.android.com/ndk/reference/group___configuration.html
* DEFAULT 0dpi
* ldpi (low) 120dpi
* mdpi (medium) 160dpi
* TV 213dpi
* hdpi (high) 240dpi
* xhdpi (extra-high) 320dpi
* xxhdpi (extra-extra-high) 480dpi
* xxxhdpi (extra-extra-extra-high) 640dpi
* anydpi 65534dpi (0xFFFE)
* nodpi 65535dpi (0xFFFF)
There is a difference between nodpi and anydpi:
nodpi will be used if no other density is specified. Or the density does not match.
nodpi is the fallback for everything else. If there is a resource that matches the DPI,
this is used.
anydpi is also valid for all densities but in this case, anydpi will overrule all other files!
Therefore anydpi is usually used with vector graphics and with constraints on the API level.
For example adaptive icons are usually marked as anydpi.
When it comes now to selecting an icon, there is the following flow:
1) is there an anydpi icon?
2) is there an icon for the dpi of the device?
3) is there a nodpi icon?
4) (only on very old devices) is there a icon with dpi 0 (the default)
For more information read here: https://stackoverflow.com/a/34370735/446140
:rtype: :class:`str`
"""
main_activity_name = self.get_main_activity()
app_icon = self.get_attribute_value(
'activity', 'icon', name=main_activity_name)
if not app_icon:
app_icon = self.get_attribute_value('application', 'icon')
res_parser = self.get_android_resources()
if not res_parser:
# Can not do anything below this point to resolve...
return None
if not app_icon:
res_id = res_parser.get_res_id_by_key(self.package, 'mipmap', 'ic_launcher')
if res_id:
app_icon = "@%x" % res_id
if not app_icon:
res_id = res_parser.get_res_id_by_key(self.package, 'drawable', 'ic_launcher')
if res_id:
app_icon = "@%x" % res_id
if not app_icon:
# If the icon can not be found, return now
return None
if app_icon.startswith("@"):
res_id = int(app_icon[1:], 16)
candidates = res_parser.get_resolved_res_configs(res_id)
app_icon = None
current_dpi = -1
try:
for config, file_name in candidates:
dpi = config.get_density()
if current_dpi < dpi <= max_dpi:
app_icon = file_name
current_dpi = dpi
except Exception as e:
log.warning("Exception selecting app icon: %s" % e)
return app_icon
def get_package(self):
"""
Return the name of the package
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.package
def get_androidversion_code(self):
"""
Return the android version code
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.androidversion["Code"]
def get_androidversion_name(self):
"""
Return the android version name
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.androidversion["Name"]
def get_files(self):
"""
Return the file names inside the APK.
:rtype: a list of :class:`str`
"""
return self.zip.namelist()
def _get_file_magic_name(self, buffer):
"""
Return the filetype guessed for a buffer
:param buffer: bytes
:return: str of filetype
"""
default = "Unknown"
ftype = None
try:
# Magic is optional
import magic
except ImportError:
return default
try:
# There are several implementations of magic,
# unfortunately all called magic
# We use this one: https://github.com/ahupp/python-magic/
getattr(magic, "MagicException")
except AttributeError:
# Looks like no magic was installed
return default
try:
ftype = magic.from_buffer(buffer[:1024])
except magic.MagicError as e:
log.exception("Error getting the magic type!")
return default
if not ftype:
return default
else:
return self._patch_magic(buffer, ftype)
@property
def files(self):
"""
Returns a dictionary of filenames and detected magic type
:return: dictionary of files and their mime type
"""
return self.get_files_types()
def get_files_types(self):
"""
Return the files inside the APK with their associated types (by using python-magic)
:rtype: a dictionnary
"""
if self._files == {}:
# Generate File Types / CRC List
for i in self.get_files():
buffer = self.zip.read(i)
self.files_crc32[i] = crc32(buffer)
# FIXME why not use the crc from the zipfile?
# should be validated as well.
# crc = self.zip.getinfo(i).CRC
self._files[i] = self._get_file_magic_name(buffer)
return self._files
def _patch_magic(self, buffer, orig):
"""
Overwrite some probably wrong detections by mime libraries
:param buffer: bytes of the file to detect
:param orig: guess by mime libary
:return: corrected guess
"""
if ("Zip" in orig) or ('(JAR)' in orig):
val = is_android_raw(buffer)
if val == "APK":
return "Android application package file"
return orig
def get_files_crc32(self):
"""
Calculates and returns a dictionary of filenames and CRC32
:return: dict of filename: CRC32
"""
if self.files_crc32 == {}:
for i in self.get_files():
buffer = self.zip.read(i)
self.files_crc32[i] = crc32(buffer)
return self.files_crc32
def get_files_information(self):
"""
Return the files inside the APK with their associated types and crc32
:rtype: str, str, int
"""
for k in self.get_files():
yield k, self.get_files_types()[k], self.get_files_crc32()[k]
def get_raw(self):
"""
Return raw bytes of the APK
:rtype: bytes
"""
return self.__raw
def get_file(self, filename):
"""
Return the raw data of the specified filename
inside the APK
:rtype: bytes
"""
try:
return self.zip.read(filename)
except KeyError:
raise FileNotPresent(filename)
def get_dex(self):
"""
Return the raw data of the classes dex file
This will give you the data of the file called `classes.dex`
inside the APK. If the APK has multiple DEX files, you need to use :func:`~APK.get_all_dex`.
:rtype: bytes
"""
try:
return self.get_file("classes.dex")
except FileNotPresent:
return ""
def get_dex_names(self):
"""
Return the names of all DEX files found in the APK.
This method only accounts for "offical" dex files, i.e. all files
in the root directory of the APK named classes.dex or classes[0-9]+.dex
:rtype: a list of str
"""
dexre = re.compile("classes(\d*).dex")
return filter(lambda x: dexre.match(x), self.get_files())
def get_all_dex(self):
"""
Return the raw data of all classes dex files
:rtype: a generator of bytes
"""
for dex_name in self.get_dex_names():
yield self.get_file(dex_name)
@DeprecationWarning
def get_elements(self, tag_name, attribute, with_namespace=True):
"""
Deprecated: use `get_all_attribute_value()` instead
Return elements in xml files which match with the tag name and the specific attribute
:param tag_name: a string which specify the tag name
:param attribute: a string which specify the attribute
"""
for i in self.xml:
if self.xml[i] is None:
continue
for item in self.xml[i].findall('.//' + tag_name):
if with_namespace:
value = item.get(self._ns(attribute))
else:
value = item.get(attribute)
# There might be an attribute without the namespace
if value:
yield self._format_value(value)
def _format_value(self, value):
"""
Format a value with packagename, if not already set
:param value:
:return:
"""
if len(value) > 0:
if value[0] == ".":
value = self.package + value
else:
v_dot = value.find(".")
if v_dot == 0:
value = self.package + "." + value
elif v_dot == -1:
value = self.package + "." + value
return value
@DeprecationWarning
def get_element(self, tag_name, attribute, **attribute_filter):
"""
:Deprecated: use `get_attribute_value()` instead
Return element in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:rtype: string
"""
for i in self.xml:
if self.xml[i] is None:
continue
tag = self.xml[i].findall('.//' + tag_name)
if len(tag) == 0:
return None
for item in tag:
skip_this_item = False
for attr, val in list(attribute_filter.items()):
attr_val = item.get(self._ns(attr))
if attr_val != val:
skip_this_item = True
break
if skip_this_item:
continue
value = item.get(self._ns(attribute))
if value is not None:
return value
return None
def get_all_attribute_value(
self, tag_name, attribute, format_value=True, **attribute_filter
):
"""
Return all the attribute values in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:param format_value: specify if the value needs to be formatted with packagename
:type format_value: boolean
"""
tags = self.find_tags(tag_name, **attribute_filter)
for tag in tags:
value = tag.get(attribute) or tag.get(self._ns(attribute))
if value is not None:
if format_value:
yield self._format_value(value)
else:
yield value
def get_attribute_value(
self, tag_name, attribute, format_value=False, **attribute_filter
):
"""
Return the attribute value in xml files which matches the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:param format_value: specify if the value needs to be formatted with packagename
:type format_value: boolean
"""
for value in self.get_all_attribute_value(
tag_name, attribute, format_value, **attribute_filter):
if value is not None:
return value
def get_value_from_tag(self, tag, attribute):
"""
Return the value of the attribute in a specific tag
:param tag: specify the tag element
:type tag: Element
:param attribute: specify the attribute
:type attribute: string
"""
# TODO: figure out if both android:name and name tag exist which one to give preference
value = tag.get(self._ns(attribute))
if value is None:
log.warning("Failed to get the attribute with namespace")
value = tag.get(attribute)
return value
def find_tags(self, tag_name, **attribute_filter):
"""
Return a list of all the matched tags in all available xml
:param tag: specify the tag name
:type tag: string
"""
all_tags = [
self.find_tags_from_xml(
i, tag_name, **attribute_filter
)
for i in self.xml
]
return [tag for tag_list in all_tags for tag in tag_list]
def find_tags_from_xml(
self, xml_name, tag_name, **attribute_filter
):
"""
Return a list of all the matched tags in a specific xml
:param xml_name: specify from which xml to pick the tag from
:type xml_name: string
:param tag_name: specify the tag name
:type tag_name: string
"""
xml = self.xml[xml_name]
if xml is None:
return []
if xml.tag == tag_name:
if self.is_tag_matched(
xml.tag, **attribute_filter
):
return [xml]
return []
tags = xml.findall(".//" + tag_name)
return [
tag for tag in tags if self.is_tag_matched(
tag, **attribute_filter
)
]
def is_tag_matched(self, tag, **attribute_filter):
"""
Return true if the attributes matches in attribute filter
:param tag: specify the tag element
:type tag: Element
:param attribute: specify the attribute
:type attribute: string
"""
if len(attribute_filter) <= 0:
return True
for attr, value in attribute_filter.items():
# TODO: figure out if both android:name and name tag exist which one to give preference
_value = tag.get(self._ns(attr))
if _value is None:
log.warning("Failed to get the attribute with namespace")
_value = tag.get(attr)
if _value != value:
return False
return True
def get_main_activities(self):
"""
Return names of the main activities
These values are read from the AndroidManifest.xml
:rtype: a set of str
"""
x = set()
y = set()
for i in self.xml:
if self.xml[i] is None:
continue
activities_and_aliases = self.xml[i].findall(".//activity") + \
self.xml[i].findall(".//activity-alias")
for item in activities_and_aliases:
# Some applications have more than one MAIN activity.
# For example: paid and free content
activityEnabled = item.get(self._ns("enabled"))
if activityEnabled == "false":
continue
for sitem in item.findall(".//action"):
val = sitem.get(self._ns("name"))
if val == "android.intent.action.MAIN":
activity = item.get(self._ns("name"))
if activity is not None:
x.add(item.get(self._ns("name")))
else:
log.warning('Main activity without name')
for sitem in item.findall(".//category"):
val = sitem.get(self._ns("name"))
if val == "android.intent.category.LAUNCHER":
activity = item.get(self._ns("name"))
if activity is not None:
y.add(item.get(self._ns("name")))
else:
log.warning('Launcher activity without name')
return x.intersection(y)
def get_main_activity(self):
"""
Return the name of the main activity
This value is read from the AndroidManifest.xml
:rtype: str
"""
activities = self.get_main_activities()
if len(activities) > 0:
return self._format_value(activities.pop())
return None
def get_activities(self):
"""
Return the android:name attribute of all activities
:rtype: a list of str
"""
return list(self.get_all_attribute_value("activity", "name"))
def get_services(self):
"""
Return the android:name attribute of all services
:rtype: a list of str
"""
return list(self.get_all_attribute_value("service", "name"))
def get_receivers(self):
"""
Return the android:name attribute of all receivers
:rtype: a list of string
"""
return list(self.get_all_attribute_value("receiver", "name"))
def get_providers(self):
"""
Return the android:name attribute of all providers
:rtype: a list of string
"""
return list(self.get_all_attribute_value("provider", "name"))
def get_intent_filters(self, itemtype, name):
"""
Find intent filters for a given item and name.
Intent filter are attached to activities, services or receivers.
You can search for the intent filters of such items and get a dictionary of all
attached actions and intent categories.
:param itemtype: the type of parent item to look for, e.g. `activity`, `service` or `receiver`
:param name: the `android:name` of the parent item, e.g. activity name
:return: a dictionary with the keys `action` and `category` containing the `android:name` of those items
"""
d = {"action": [], "category": []}
for i in self.xml:
# TODO: this can probably be solved using a single xpath
for item in self.xml[i].findall(".//" + itemtype):
if self._format_value(item.get(self._ns("name"))) == name:
for sitem in item.findall(".//intent-filter"):
for ssitem in sitem.findall("action"):
if ssitem.get(self._ns("name")) not in d["action"]:
d["action"].append(ssitem.get(self._ns("name")))
for ssitem in sitem.findall("category"):
if ssitem.get(self._ns("name")) not in d["category"]:
d["category"].append(ssitem.get(self._ns("name")))
if not d["action"]:
del d["action"]
if not d["category"]:
del d["category"]
return d
def get_permissions(self):
"""
Return permissions names declared in the AndroidManifest.xml.
It is possible that permissions are returned multiple times,
as this function does not filter the permissions, i.e. it shows you
exactly what was defined in the AndroidManifest.xml.
Implied permissions, which are granted automatically, are not returned
here. Use :meth:`get_uses_implied_permission_list` if you need a list
of implied permissions.
:returns: A list of permissions
:rtype: list
"""
return self.permissions
def get_uses_implied_permission_list(self):
"""
Return all permissions implied by the target SDK or other permissions.
:rtype: list of string
"""
target_sdk_version = self.get_effective_target_sdk_version()
READ_CALL_LOG = 'android.permission.READ_CALL_LOG'
READ_CONTACTS = 'android.permission.READ_CONTACTS'
READ_EXTERNAL_STORAGE = 'android.permission.READ_EXTERNAL_STORAGE'
READ_PHONE_STATE = 'android.permission.READ_PHONE_STATE'
WRITE_CALL_LOG = 'android.permission.WRITE_CALL_LOG'
WRITE_CONTACTS = 'android.permission.WRITE_CONTACTS'
WRITE_EXTERNAL_STORAGE = 'android.permission.WRITE_EXTERNAL_STORAGE'
implied = []
implied_WRITE_EXTERNAL_STORAGE = False
if target_sdk_version < 4:
if WRITE_EXTERNAL_STORAGE not in self.permissions:
implied.append([WRITE_EXTERNAL_STORAGE, None])
implied_WRITE_EXTERNAL_STORAGE = True
if READ_PHONE_STATE not in self.permissions:
implied.append([READ_PHONE_STATE, None])
if (WRITE_EXTERNAL_STORAGE in self.permissions or implied_WRITE_EXTERNAL_STORAGE) \
and READ_EXTERNAL_STORAGE not in self.permissions:
maxSdkVersion = None
for name, version in self.uses_permissions:
if name == WRITE_EXTERNAL_STORAGE:
maxSdkVersion = version
break
implied.append([READ_EXTERNAL_STORAGE, maxSdkVersion])
if target_sdk_version < 16:
if READ_CONTACTS in self.permissions \
and READ_CALL_LOG not in self.permissions:
implied.append([READ_CALL_LOG, None])
if WRITE_CONTACTS in self.permissions \
and WRITE_CALL_LOG not in self.permissions:
implied.append([WRITE_CALL_LOG, None])
return implied
def get_details_permissions(self):
"""
Return permissions with details
:rtype: dict of {permission: [protectionLevel, label, description]}
"""
l = {}
for i in self.permissions:
if i in self.permission_module:
x = self.permission_module[i]
l[i] = [x["protectionLevel"], x["label"], x["description"]]
else:
# FIXME: the permission might be signature, if it is defined by the app itself!
l[i] = ["normal", "Unknown permission from android reference",
"Unknown permission from android reference"]
return l
@DeprecationWarning
def get_requested_permissions(self):
"""
Returns all requested permissions.
It has the same result as :meth:`get_permissions` and might be removed in the future
:rtype: list of str
"""
return self.get_permissions()
def get_requested_aosp_permissions(self):
"""
Returns requested permissions declared within AOSP project.
This includes several other permissions as well, which are in the platform apps.
:rtype: list of str
"""
aosp_permissions = []
all_permissions = self.get_permissions()
for perm in all_permissions:
if perm in list(self.permission_module.keys()):
aosp_permissions.append(perm)
return aosp_permissions
def get_requested_aosp_permissions_details(self):
"""
Returns requested aosp permissions with details.
:rtype: dictionary
"""
l = {}
for i in self.permissions:
try:
l[i] = self.permission_module[i]
except KeyError:
# if we have not found permission do nothing
continue
return l
def get_requested_third_party_permissions(self):
"""
Returns list of requested permissions not declared within AOSP project.
:rtype: list of strings
"""
third_party_permissions = []
all_permissions = self.get_permissions()
for perm in all_permissions:
if perm not in list(self.permission_module.keys()):
third_party_permissions.append(perm)
return third_party_permissions
def get_declared_permissions(self):
"""
Returns list of the declared permissions.
:rtype: list of strings
"""
return list(self.declared_permissions.keys())
def get_declared_permissions_details(self):
"""
Returns declared permissions with the details.
:rtype: dict
"""
return self.declared_permissions
def get_max_sdk_version(self):
"""
Return the android:maxSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "maxSdkVersion")
def get_min_sdk_version(self):
"""
Return the android:minSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "minSdkVersion")
def get_target_sdk_version(self):
"""
Return the android:targetSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "targetSdkVersion")
def get_effective_target_sdk_version(self):
"""
Return the effective targetSdkVersion, always returns int > 0.
If the targetSdkVersion is not set, it defaults to 1. This is
set based on defaults as defined in:
https://developer.android.com/guide/topics/manifest/uses-sdk-element.html
:rtype: int
"""
target_sdk_version = self.get_target_sdk_version()
if not target_sdk_version:
target_sdk_version = self.get_min_sdk_version()
try:
return int(target_sdk_version)
except (ValueError, TypeError):
return 1
def get_libraries(self):
"""
Return the android:name attributes for libraries
:rtype: list
"""
return list(self.get_all_attribute_value("uses-library", "name"))
def get_features(self):
"""
Return a list of all android:names found for the tag uses-feature
in the AndroidManifest.xml
:return: list
"""
return list(self.get_all_attribute_value("uses-feature", "name"))
def is_wearable(self):
"""
Checks if this application is build for wearables by
checking if it uses the feature 'android.hardware.type.watch'
See: https://developer.android.com/training/wearables/apps/creating.html for more information.
Not every app is setting this feature (not even the example Google provides),
so it might be wise to not 100% rely on this feature.
:return: True if wearable, False otherwise
"""
return 'android.hardware.type.watch' in self.get_features()
def is_leanback(self):
"""
Checks if this application is build for TV (Leanback support)
by checkin if it uses the feature 'android.software.leanback'
:return: True if leanback feature is used, false otherwise
"""
return 'android.software.leanback' in self.get_features()
def is_androidtv(self):
"""
Checks if this application does not require a touchscreen,
as this is the rule to get into the TV section of the Play Store
See: https://developer.android.com/training/tv/start/start.html for more information.
:return: True if 'android.hardware.touchscreen' is not required, False otherwise
"""
return self.get_attribute_value(
'uses-feature', 'name', required="false",
name="android.hardware.touchscreen"
) == "android.hardware.touchscreen"
def new_zip(self, filename, deleted_files=None, new_files={}):
"""
Create a new zip file
:param filename: the output filename of the zip
:param deleted_files: a regex pattern to remove specific file
:param new_files: a dictionnary of new files
:type filename: string
:type deleted_files: None or a string
:type new_files: a dictionnary (key:filename, value:content of the file)
"""
zout = zipfile.ZipFile(filename, 'w')
for item in self.zip.infolist():
# Block one: deleted_files, or deleted_files and new_files
if deleted_files is not None:
if re.match(deleted_files, item.filename) is None:
# if the regex of deleted_files doesn't match the filename
if new_files is not False:
if item.filename in new_files:
# and if the filename is in new_files
zout.writestr(item, new_files[item.filename])
continue
# Otherwise, write the original file.
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
# Block two: deleted_files is None, new_files is not empty
elif new_files is not False:
if item.filename in new_files:
zout.writestr(item, new_files[item.filename])
else:
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
# Block three: deleted_files is None, new_files is empty.
# Just write out the default zip
else:
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
zout.close()
def get_android_manifest_axml(self):
"""
Return the :class:`AXMLPrinter` object which corresponds to the AndroidManifest.xml file
:rtype: :class:`~androguard.core.bytecodes.axml.AXMLPrinter`
"""
try:
return self.axml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_manifest_xml(self):
"""
Return the parsed xml object which corresponds to the AndroidManifest.xml file
:rtype: :class:`~lxml.etree.Element`
"""
try:
return self.xml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_resources(self):
"""
Return the :class:`~androguard.core.bytecodes.axml.ARSCParser`
object which corresponds to the resources.arsc file
:rtype: :class:`~androguard.core.bytecodes.axml.ARSCParser`
"""
try:
return self.arsc["resources.arsc"]
except KeyError:
if "resources.arsc" not in self.zip.namelist():
# There is a rare case, that no resource file is supplied.
# Maybe it was added manually, thus we check here
return None
self.arsc["resources.arsc"] = ARSCParser(self.zip.read("resources.arsc"))
return self.arsc["resources.arsc"]
def show(self):
self.get_files_types()
print("FILES: ")
for i in self.get_files():
try:
print("\t", i, self._files[i], "%x" % self.files_crc32[i])
except KeyError:
print("\t", i, "%x" % self.files_crc32[i])
print("DECLARED PERMISSIONS:")
declared_permissions = self.get_declared_permissions()
for i in declared_permissions:
print("\t", i)
print("REQUESTED PERMISSIONS:")
requested_permissions = self.get_permissions()
for i in requested_permissions:
print("\t", i)
print("MAIN ACTIVITY: ", self.get_main_activity())
print("ACTIVITIES: ")
activities = self.get_activities()
for i in activities:
filters = self.get_intent_filters("activity", i)
print("\t", i, filters or "")
print("SERVICES: ")
services = self.get_services()
for i in services:
filters = self.get_intent_filters("service", i)
print("\t", i, filters or "")
print("RECEIVERS: ")
receivers = self.get_receivers()
for i in receivers:
filters = self.get_intent_filters("receiver", i)
print("\t", i, filters or "")
print("PROVIDERS: ", self.get_providers())
@property
def application(self):
return self.get_app_name()
@property
def packagename(self):
return self.get_package()
@property
def version_name(self):
return self.get_androidversion_name()
@property
def version_code(self):
return self.get_androidversion_code()
@property
def icon_info(self):
return self.get_app_icon()
@property
def icon_data(self):
app_icon_file = self.get_app_icon()
app_icon_data = None
try:
app_icon_data = self.get_file(app_icon_file)
except FileNotPresent:
try:
app_icon_data = self.get_file(app_icon_file.encode().decode('cp437'))
except FileNotPresent:
pass
return app_icon_data
|
appknox/pyaxmlparser | pyaxmlparser/core.py | APK.get_elements | python | def get_elements(self, tag_name, attribute, with_namespace=True):
for i in self.xml:
if self.xml[i] is None:
continue
for item in self.xml[i].findall('.//' + tag_name):
if with_namespace:
value = item.get(self._ns(attribute))
else:
value = item.get(attribute)
# There might be an attribute without the namespace
if value:
yield self._format_value(value) | Deprecated: use `get_all_attribute_value()` instead
Return elements in xml files which match with the tag name and the specific attribute
:param tag_name: a string which specify the tag name
:param attribute: a string which specify the attribute | train | https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/core.py#L614-L631 | [
"def _ns(self, name):\n \"\"\"\n return the name including the Android namespace\n \"\"\"\n return NS_ANDROID + name\n",
"def _format_value(self, value):\n \"\"\"\n Format a value with packagename, if not already set\n\n :param value:\n :return:\n \"\"\"\n if len(value) > 0:\n ... | class APK(object):
def __init__(self, filename, raw=False, magic_file=None, skip_analysis=False, testzip=False):
"""
This class can access to all elements in an APK file
example::
APK("myfile.apk")
APK(read("myfile.apk"), raw=True)
:param filename: specify the path of the file, or raw data
:param raw: specify if the filename is a path or raw data (optional)
:param magic_file: specify the magic file (not used anymore - legacy only)
:param skip_analysis: Skip the analysis, e.g. no manifest files are read. (default: False)
:param testzip: Test the APK for integrity, e.g. if the ZIP file is broken.
Throw an exception on failure (default False)
:type filename: string
:type raw: boolean
:type magic_file: string
:type skip_analysis: boolean
:type testzip: boolean
"""
if magic_file:
log.warning("You set magic_file but this parameter is actually unused. You should remove it.")
self.filename = filename
self.xml = {}
self.axml = {}
self.arsc = {}
self.package = ""
self.androidversion = {}
self.permissions = []
self.uses_permissions = []
self.declared_permissions = {}
self.valid_apk = False
self._files = {}
self.files_crc32 = {}
if raw is True:
self.__raw = bytearray(filename)
self._sha256 = hashlib.sha256(self.__raw).hexdigest()
# Set the filename to something sane
self.filename = "raw_apk_sha256:{}".format(self._sha256)
else:
self.__raw = bytearray(read(filename))
self.zip = zipfile.ZipFile(io.BytesIO(self.__raw), mode="r")
if testzip:
# Test the zipfile for integrity before continuing.
# This process might be slow, as the whole file is read.
# Therefore it is possible to enable it as a separate feature.
#
# A short benchmark showed, that testing the zip takes about 10 times longer!
# e.g. normal zip loading (skip_analysis=True) takes about 0.01s, where
# testzip takes 0.1s!
ret = self.zip.testzip()
if ret is not None:
# we could print the filename here, but there are zip which are so broken
# That the filename is either very very long or does not make any sense.
# Thus we do not do it, the user might find out by using other tools.
raise BrokenAPKError("The APK is probably broken: testzip returned an error.")
if not skip_analysis:
self._apk_analysis()
def _ns(self, name):
"""
return the name including the Android namespace
"""
return NS_ANDROID + name
def _apk_analysis(self):
"""
Run analysis on the APK file.
This method is usually called by __init__ except if skip_analysis is False.
It will then parse the AndroidManifest.xml and set all fields in the APK class which can be
extracted from the Manifest.
"""
i = "AndroidManifest.xml"
try:
manifest_data = self.zip.read(i)
except KeyError:
log.warning("Missing AndroidManifest.xml. Is this an APK file?")
else:
ap = AXMLPrinter(manifest_data)
if not ap.is_valid():
log.error("Error while parsing AndroidManifest.xml - is the file valid?")
return
self.axml[i] = ap
self.xml[i] = self.axml[i].get_xml_obj()
if self.axml[i].is_packed():
log.warning("XML Seems to be packed, operations on the AndroidManifest.xml might fail.")
if self.xml[i] is not None:
if self.xml[i].tag != "manifest":
log.error("AndroidManifest.xml does not start with a <manifest> tag! Is this a valid APK?")
return
self.package = self.get_attribute_value("manifest", "package")
self.androidversion["Code"] = self.get_attribute_value("manifest", "versionCode")
self.androidversion["Name"] = self.get_attribute_value("manifest", "versionName")
permission = list(self.get_all_attribute_value("uses-permission", "name"))
self.permissions = list(set(self.permissions + permission))
for uses_permission in self.find_tags("uses-permission"):
self.uses_permissions.append([
self.get_value_from_tag(uses_permission, "name"),
self._get_permission_maxsdk(uses_permission)
])
# getting details of the declared permissions
for d_perm_item in self.find_tags('permission'):
d_perm_name = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "name")))
d_perm_label = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "label")))
d_perm_description = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "description")))
d_perm_permissionGroup = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "permissionGroup")))
d_perm_protectionLevel = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "protectionLevel")))
d_perm_details = {
"label": d_perm_label,
"description": d_perm_description,
"permissionGroup": d_perm_permissionGroup,
"protectionLevel": d_perm_protectionLevel,
}
self.declared_permissions[d_perm_name] = d_perm_details
self.valid_apk = True
def __getstate__(self):
"""
Function for pickling APK Objects.
We remove the zip from the Object, as it is not pickable
And it does not make any sense to pickle it anyways.
:return: the picklable APK Object without zip.
"""
# Upon pickling, we need to remove the ZipFile
x = self.__dict__
x['axml'] = str(x['axml'])
x['xml'] = str(x['xml'])
del x['zip']
return x
def __setstate__(self, state):
"""
Load a pickled APK Object and restore the state
We load the zip file back by reading __raw from the Object.
:param state: pickled state
"""
self.__dict__ = state
self.zip = zipfile.ZipFile(io.BytesIO(self.__raw), mode="r")
def _get_res_string_value(self, string):
if not string.startswith('@string/'):
return string
string_key = string[9:]
res_parser = self.get_android_resources()
if not res_parser:
return ''
string_value = ''
for package_name in res_parser.get_packages_names():
extracted_values = res_parser.get_string(package_name, string_key)
if extracted_values:
string_value = extracted_values[1]
break
return string_value
def _get_permission_maxsdk(self, item):
maxSdkVersion = None
try:
maxSdkVersion = int(self.get_value_from_tag(item, "maxSdkVersion"))
except ValueError:
log.warning(self.get_max_sdk_version() + 'is not a valid value for <uses-permission> maxSdkVersion')
except TypeError:
pass
return maxSdkVersion
def is_valid_APK(self):
"""
Return true if the APK is valid, false otherwise.
An APK is seen as valid, if the AndroidManifest.xml could be successful parsed.
This does not mean that the APK has a valid signature nor that the APK
can be installed on an Android system.
:rtype: boolean
"""
return self.valid_apk
def get_filename(self):
"""
Return the filename of the APK
:rtype: :class:`str`
"""
return self.filename
def get_app_name(self):
"""
Return the appname of the APK
This name is read from the AndroidManifest.xml
using the application android:label.
If no label exists, the android:label of the main activity is used.
If there is also no main activity label, an empty string is returned.
:rtype: :class:`str`
"""
app_name = self.get_attribute_value('application', 'label')
if app_name is None:
activities = self.get_main_activities()
main_activity_name = None
if len(activities) > 0:
main_activity_name = activities.pop()
app_name = self.get_attribute_value(
'activity', 'label', name=main_activity_name
)
if app_name is None:
# No App name set
# TODO return packagename instead?
log.warning("It looks like that no app name is set for the main activity!")
return ""
if app_name.startswith("@"):
res_parser = self.get_android_resources()
if not res_parser:
# TODO: What should be the correct return value here?
return app_name
res_id, package = res_parser.parse_id(app_name)
# If the package name is the same as the APK package,
# we should be able to resolve the ID.
if package and package != self.get_package():
if package == 'android':
# TODO: we can not resolve this, as we lack framework-res.apk
# one exception would be when parsing framework-res.apk directly.
log.warning("Resource ID with android package name encountered! "
"Will not resolve, framework-res.apk would be required.")
return app_name
else:
# TODO should look this up, might be in the resources
log.warning("Resource ID with Package name '{}' encountered! Will not resolve".format(package))
return app_name
try:
app_name = res_parser.get_resolved_res_configs(
res_id,
ARSCResTableConfig.default_config())[0][1]
except Exception as e:
log.warning("Exception selecting app name: %s" % e)
return app_name
def get_app_icon(self, max_dpi=65536):
"""
Return the first icon file name, which density is not greater than max_dpi,
unless exact icon resolution is set in the manifest, in which case
return the exact file.
This information is read from the AndroidManifest.xml
From https://developer.android.com/guide/practices/screens_support.html
and https://developer.android.com/ndk/reference/group___configuration.html
* DEFAULT 0dpi
* ldpi (low) 120dpi
* mdpi (medium) 160dpi
* TV 213dpi
* hdpi (high) 240dpi
* xhdpi (extra-high) 320dpi
* xxhdpi (extra-extra-high) 480dpi
* xxxhdpi (extra-extra-extra-high) 640dpi
* anydpi 65534dpi (0xFFFE)
* nodpi 65535dpi (0xFFFF)
There is a difference between nodpi and anydpi:
nodpi will be used if no other density is specified. Or the density does not match.
nodpi is the fallback for everything else. If there is a resource that matches the DPI,
this is used.
anydpi is also valid for all densities but in this case, anydpi will overrule all other files!
Therefore anydpi is usually used with vector graphics and with constraints on the API level.
For example adaptive icons are usually marked as anydpi.
When it comes now to selecting an icon, there is the following flow:
1) is there an anydpi icon?
2) is there an icon for the dpi of the device?
3) is there a nodpi icon?
4) (only on very old devices) is there a icon with dpi 0 (the default)
For more information read here: https://stackoverflow.com/a/34370735/446140
:rtype: :class:`str`
"""
main_activity_name = self.get_main_activity()
app_icon = self.get_attribute_value(
'activity', 'icon', name=main_activity_name)
if not app_icon:
app_icon = self.get_attribute_value('application', 'icon')
res_parser = self.get_android_resources()
if not res_parser:
# Can not do anything below this point to resolve...
return None
if not app_icon:
res_id = res_parser.get_res_id_by_key(self.package, 'mipmap', 'ic_launcher')
if res_id:
app_icon = "@%x" % res_id
if not app_icon:
res_id = res_parser.get_res_id_by_key(self.package, 'drawable', 'ic_launcher')
if res_id:
app_icon = "@%x" % res_id
if not app_icon:
# If the icon can not be found, return now
return None
if app_icon.startswith("@"):
res_id = int(app_icon[1:], 16)
candidates = res_parser.get_resolved_res_configs(res_id)
app_icon = None
current_dpi = -1
try:
for config, file_name in candidates:
dpi = config.get_density()
if current_dpi < dpi <= max_dpi:
app_icon = file_name
current_dpi = dpi
except Exception as e:
log.warning("Exception selecting app icon: %s" % e)
return app_icon
def get_package(self):
"""
Return the name of the package
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.package
def get_androidversion_code(self):
"""
Return the android version code
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.androidversion["Code"]
def get_androidversion_name(self):
"""
Return the android version name
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.androidversion["Name"]
def get_files(self):
"""
Return the file names inside the APK.
:rtype: a list of :class:`str`
"""
return self.zip.namelist()
def _get_file_magic_name(self, buffer):
"""
Return the filetype guessed for a buffer
:param buffer: bytes
:return: str of filetype
"""
default = "Unknown"
ftype = None
try:
# Magic is optional
import magic
except ImportError:
return default
try:
# There are several implementations of magic,
# unfortunately all called magic
# We use this one: https://github.com/ahupp/python-magic/
getattr(magic, "MagicException")
except AttributeError:
# Looks like no magic was installed
return default
try:
ftype = magic.from_buffer(buffer[:1024])
except magic.MagicError as e:
log.exception("Error getting the magic type!")
return default
if not ftype:
return default
else:
return self._patch_magic(buffer, ftype)
@property
def files(self):
"""
Returns a dictionary of filenames and detected magic type
:return: dictionary of files and their mime type
"""
return self.get_files_types()
def get_files_types(self):
"""
Return the files inside the APK with their associated types (by using python-magic)
:rtype: a dictionnary
"""
if self._files == {}:
# Generate File Types / CRC List
for i in self.get_files():
buffer = self.zip.read(i)
self.files_crc32[i] = crc32(buffer)
# FIXME why not use the crc from the zipfile?
# should be validated as well.
# crc = self.zip.getinfo(i).CRC
self._files[i] = self._get_file_magic_name(buffer)
return self._files
def _patch_magic(self, buffer, orig):
"""
Overwrite some probably wrong detections by mime libraries
:param buffer: bytes of the file to detect
:param orig: guess by mime libary
:return: corrected guess
"""
if ("Zip" in orig) or ('(JAR)' in orig):
val = is_android_raw(buffer)
if val == "APK":
return "Android application package file"
return orig
def get_files_crc32(self):
"""
Calculates and returns a dictionary of filenames and CRC32
:return: dict of filename: CRC32
"""
if self.files_crc32 == {}:
for i in self.get_files():
buffer = self.zip.read(i)
self.files_crc32[i] = crc32(buffer)
return self.files_crc32
def get_files_information(self):
"""
Return the files inside the APK with their associated types and crc32
:rtype: str, str, int
"""
for k in self.get_files():
yield k, self.get_files_types()[k], self.get_files_crc32()[k]
def get_raw(self):
"""
Return raw bytes of the APK
:rtype: bytes
"""
return self.__raw
def get_file(self, filename):
"""
Return the raw data of the specified filename
inside the APK
:rtype: bytes
"""
try:
return self.zip.read(filename)
except KeyError:
raise FileNotPresent(filename)
def get_dex(self):
"""
Return the raw data of the classes dex file
This will give you the data of the file called `classes.dex`
inside the APK. If the APK has multiple DEX files, you need to use :func:`~APK.get_all_dex`.
:rtype: bytes
"""
try:
return self.get_file("classes.dex")
except FileNotPresent:
return ""
def get_dex_names(self):
"""
Return the names of all DEX files found in the APK.
This method only accounts for "offical" dex files, i.e. all files
in the root directory of the APK named classes.dex or classes[0-9]+.dex
:rtype: a list of str
"""
dexre = re.compile("classes(\d*).dex")
return filter(lambda x: dexre.match(x), self.get_files())
def get_all_dex(self):
"""
Return the raw data of all classes dex files
:rtype: a generator of bytes
"""
for dex_name in self.get_dex_names():
yield self.get_file(dex_name)
def is_multidex(self):
"""
Test if the APK has multiple DEX files
:return: True if multiple dex found, otherwise False
"""
dexre = re.compile("^classes(\d+)?.dex$")
return len([instance for instance in self.get_files() if dexre.search(instance)]) > 1
@DeprecationWarning
def _format_value(self, value):
"""
Format a value with packagename, if not already set
:param value:
:return:
"""
if len(value) > 0:
if value[0] == ".":
value = self.package + value
else:
v_dot = value.find(".")
if v_dot == 0:
value = self.package + "." + value
elif v_dot == -1:
value = self.package + "." + value
return value
@DeprecationWarning
def get_element(self, tag_name, attribute, **attribute_filter):
"""
:Deprecated: use `get_attribute_value()` instead
Return element in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:rtype: string
"""
for i in self.xml:
if self.xml[i] is None:
continue
tag = self.xml[i].findall('.//' + tag_name)
if len(tag) == 0:
return None
for item in tag:
skip_this_item = False
for attr, val in list(attribute_filter.items()):
attr_val = item.get(self._ns(attr))
if attr_val != val:
skip_this_item = True
break
if skip_this_item:
continue
value = item.get(self._ns(attribute))
if value is not None:
return value
return None
def get_all_attribute_value(
self, tag_name, attribute, format_value=True, **attribute_filter
):
"""
Return all the attribute values in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:param format_value: specify if the value needs to be formatted with packagename
:type format_value: boolean
"""
tags = self.find_tags(tag_name, **attribute_filter)
for tag in tags:
value = tag.get(attribute) or tag.get(self._ns(attribute))
if value is not None:
if format_value:
yield self._format_value(value)
else:
yield value
def get_attribute_value(
self, tag_name, attribute, format_value=False, **attribute_filter
):
"""
Return the attribute value in xml files which matches the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:param format_value: specify if the value needs to be formatted with packagename
:type format_value: boolean
"""
for value in self.get_all_attribute_value(
tag_name, attribute, format_value, **attribute_filter):
if value is not None:
return value
def get_value_from_tag(self, tag, attribute):
"""
Return the value of the attribute in a specific tag
:param tag: specify the tag element
:type tag: Element
:param attribute: specify the attribute
:type attribute: string
"""
# TODO: figure out if both android:name and name tag exist which one to give preference
value = tag.get(self._ns(attribute))
if value is None:
log.warning("Failed to get the attribute with namespace")
value = tag.get(attribute)
return value
def find_tags(self, tag_name, **attribute_filter):
"""
Return a list of all the matched tags in all available xml
:param tag: specify the tag name
:type tag: string
"""
all_tags = [
self.find_tags_from_xml(
i, tag_name, **attribute_filter
)
for i in self.xml
]
return [tag for tag_list in all_tags for tag in tag_list]
def find_tags_from_xml(
self, xml_name, tag_name, **attribute_filter
):
"""
Return a list of all the matched tags in a specific xml
:param xml_name: specify from which xml to pick the tag from
:type xml_name: string
:param tag_name: specify the tag name
:type tag_name: string
"""
xml = self.xml[xml_name]
if xml is None:
return []
if xml.tag == tag_name:
if self.is_tag_matched(
xml.tag, **attribute_filter
):
return [xml]
return []
tags = xml.findall(".//" + tag_name)
return [
tag for tag in tags if self.is_tag_matched(
tag, **attribute_filter
)
]
def is_tag_matched(self, tag, **attribute_filter):
"""
Return true if the attributes matches in attribute filter
:param tag: specify the tag element
:type tag: Element
:param attribute: specify the attribute
:type attribute: string
"""
if len(attribute_filter) <= 0:
return True
for attr, value in attribute_filter.items():
# TODO: figure out if both android:name and name tag exist which one to give preference
_value = tag.get(self._ns(attr))
if _value is None:
log.warning("Failed to get the attribute with namespace")
_value = tag.get(attr)
if _value != value:
return False
return True
def get_main_activities(self):
"""
Return names of the main activities
These values are read from the AndroidManifest.xml
:rtype: a set of str
"""
x = set()
y = set()
for i in self.xml:
if self.xml[i] is None:
continue
activities_and_aliases = self.xml[i].findall(".//activity") + \
self.xml[i].findall(".//activity-alias")
for item in activities_and_aliases:
# Some applications have more than one MAIN activity.
# For example: paid and free content
activityEnabled = item.get(self._ns("enabled"))
if activityEnabled == "false":
continue
for sitem in item.findall(".//action"):
val = sitem.get(self._ns("name"))
if val == "android.intent.action.MAIN":
activity = item.get(self._ns("name"))
if activity is not None:
x.add(item.get(self._ns("name")))
else:
log.warning('Main activity without name')
for sitem in item.findall(".//category"):
val = sitem.get(self._ns("name"))
if val == "android.intent.category.LAUNCHER":
activity = item.get(self._ns("name"))
if activity is not None:
y.add(item.get(self._ns("name")))
else:
log.warning('Launcher activity without name')
return x.intersection(y)
def get_main_activity(self):
"""
Return the name of the main activity
This value is read from the AndroidManifest.xml
:rtype: str
"""
activities = self.get_main_activities()
if len(activities) > 0:
return self._format_value(activities.pop())
return None
def get_activities(self):
"""
Return the android:name attribute of all activities
:rtype: a list of str
"""
return list(self.get_all_attribute_value("activity", "name"))
def get_services(self):
"""
Return the android:name attribute of all services
:rtype: a list of str
"""
return list(self.get_all_attribute_value("service", "name"))
def get_receivers(self):
"""
Return the android:name attribute of all receivers
:rtype: a list of string
"""
return list(self.get_all_attribute_value("receiver", "name"))
def get_providers(self):
"""
Return the android:name attribute of all providers
:rtype: a list of string
"""
return list(self.get_all_attribute_value("provider", "name"))
def get_intent_filters(self, itemtype, name):
"""
Find intent filters for a given item and name.
Intent filter are attached to activities, services or receivers.
You can search for the intent filters of such items and get a dictionary of all
attached actions and intent categories.
:param itemtype: the type of parent item to look for, e.g. `activity`, `service` or `receiver`
:param name: the `android:name` of the parent item, e.g. activity name
:return: a dictionary with the keys `action` and `category` containing the `android:name` of those items
"""
d = {"action": [], "category": []}
for i in self.xml:
# TODO: this can probably be solved using a single xpath
for item in self.xml[i].findall(".//" + itemtype):
if self._format_value(item.get(self._ns("name"))) == name:
for sitem in item.findall(".//intent-filter"):
for ssitem in sitem.findall("action"):
if ssitem.get(self._ns("name")) not in d["action"]:
d["action"].append(ssitem.get(self._ns("name")))
for ssitem in sitem.findall("category"):
if ssitem.get(self._ns("name")) not in d["category"]:
d["category"].append(ssitem.get(self._ns("name")))
if not d["action"]:
del d["action"]
if not d["category"]:
del d["category"]
return d
def get_permissions(self):
"""
Return permissions names declared in the AndroidManifest.xml.
It is possible that permissions are returned multiple times,
as this function does not filter the permissions, i.e. it shows you
exactly what was defined in the AndroidManifest.xml.
Implied permissions, which are granted automatically, are not returned
here. Use :meth:`get_uses_implied_permission_list` if you need a list
of implied permissions.
:returns: A list of permissions
:rtype: list
"""
return self.permissions
def get_uses_implied_permission_list(self):
"""
Return all permissions implied by the target SDK or other permissions.
:rtype: list of string
"""
target_sdk_version = self.get_effective_target_sdk_version()
READ_CALL_LOG = 'android.permission.READ_CALL_LOG'
READ_CONTACTS = 'android.permission.READ_CONTACTS'
READ_EXTERNAL_STORAGE = 'android.permission.READ_EXTERNAL_STORAGE'
READ_PHONE_STATE = 'android.permission.READ_PHONE_STATE'
WRITE_CALL_LOG = 'android.permission.WRITE_CALL_LOG'
WRITE_CONTACTS = 'android.permission.WRITE_CONTACTS'
WRITE_EXTERNAL_STORAGE = 'android.permission.WRITE_EXTERNAL_STORAGE'
implied = []
implied_WRITE_EXTERNAL_STORAGE = False
if target_sdk_version < 4:
if WRITE_EXTERNAL_STORAGE not in self.permissions:
implied.append([WRITE_EXTERNAL_STORAGE, None])
implied_WRITE_EXTERNAL_STORAGE = True
if READ_PHONE_STATE not in self.permissions:
implied.append([READ_PHONE_STATE, None])
if (WRITE_EXTERNAL_STORAGE in self.permissions or implied_WRITE_EXTERNAL_STORAGE) \
and READ_EXTERNAL_STORAGE not in self.permissions:
maxSdkVersion = None
for name, version in self.uses_permissions:
if name == WRITE_EXTERNAL_STORAGE:
maxSdkVersion = version
break
implied.append([READ_EXTERNAL_STORAGE, maxSdkVersion])
if target_sdk_version < 16:
if READ_CONTACTS in self.permissions \
and READ_CALL_LOG not in self.permissions:
implied.append([READ_CALL_LOG, None])
if WRITE_CONTACTS in self.permissions \
and WRITE_CALL_LOG not in self.permissions:
implied.append([WRITE_CALL_LOG, None])
return implied
def get_details_permissions(self):
"""
Return permissions with details
:rtype: dict of {permission: [protectionLevel, label, description]}
"""
l = {}
for i in self.permissions:
if i in self.permission_module:
x = self.permission_module[i]
l[i] = [x["protectionLevel"], x["label"], x["description"]]
else:
# FIXME: the permission might be signature, if it is defined by the app itself!
l[i] = ["normal", "Unknown permission from android reference",
"Unknown permission from android reference"]
return l
@DeprecationWarning
def get_requested_permissions(self):
"""
Returns all requested permissions.
It has the same result as :meth:`get_permissions` and might be removed in the future
:rtype: list of str
"""
return self.get_permissions()
def get_requested_aosp_permissions(self):
"""
Returns requested permissions declared within AOSP project.
This includes several other permissions as well, which are in the platform apps.
:rtype: list of str
"""
aosp_permissions = []
all_permissions = self.get_permissions()
for perm in all_permissions:
if perm in list(self.permission_module.keys()):
aosp_permissions.append(perm)
return aosp_permissions
def get_requested_aosp_permissions_details(self):
"""
Returns requested aosp permissions with details.
:rtype: dictionary
"""
l = {}
for i in self.permissions:
try:
l[i] = self.permission_module[i]
except KeyError:
# if we have not found permission do nothing
continue
return l
def get_requested_third_party_permissions(self):
"""
Returns list of requested permissions not declared within AOSP project.
:rtype: list of strings
"""
third_party_permissions = []
all_permissions = self.get_permissions()
for perm in all_permissions:
if perm not in list(self.permission_module.keys()):
third_party_permissions.append(perm)
return third_party_permissions
def get_declared_permissions(self):
"""
Returns list of the declared permissions.
:rtype: list of strings
"""
return list(self.declared_permissions.keys())
def get_declared_permissions_details(self):
"""
Returns declared permissions with the details.
:rtype: dict
"""
return self.declared_permissions
def get_max_sdk_version(self):
"""
Return the android:maxSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "maxSdkVersion")
def get_min_sdk_version(self):
"""
Return the android:minSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "minSdkVersion")
def get_target_sdk_version(self):
"""
Return the android:targetSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "targetSdkVersion")
def get_effective_target_sdk_version(self):
"""
Return the effective targetSdkVersion, always returns int > 0.
If the targetSdkVersion is not set, it defaults to 1. This is
set based on defaults as defined in:
https://developer.android.com/guide/topics/manifest/uses-sdk-element.html
:rtype: int
"""
target_sdk_version = self.get_target_sdk_version()
if not target_sdk_version:
target_sdk_version = self.get_min_sdk_version()
try:
return int(target_sdk_version)
except (ValueError, TypeError):
return 1
def get_libraries(self):
"""
Return the android:name attributes for libraries
:rtype: list
"""
return list(self.get_all_attribute_value("uses-library", "name"))
def get_features(self):
"""
Return a list of all android:names found for the tag uses-feature
in the AndroidManifest.xml
:return: list
"""
return list(self.get_all_attribute_value("uses-feature", "name"))
def is_wearable(self):
"""
Checks if this application is build for wearables by
checking if it uses the feature 'android.hardware.type.watch'
See: https://developer.android.com/training/wearables/apps/creating.html for more information.
Not every app is setting this feature (not even the example Google provides),
so it might be wise to not 100% rely on this feature.
:return: True if wearable, False otherwise
"""
return 'android.hardware.type.watch' in self.get_features()
def is_leanback(self):
"""
Checks if this application is build for TV (Leanback support)
by checkin if it uses the feature 'android.software.leanback'
:return: True if leanback feature is used, false otherwise
"""
return 'android.software.leanback' in self.get_features()
def is_androidtv(self):
"""
Checks if this application does not require a touchscreen,
as this is the rule to get into the TV section of the Play Store
See: https://developer.android.com/training/tv/start/start.html for more information.
:return: True if 'android.hardware.touchscreen' is not required, False otherwise
"""
return self.get_attribute_value(
'uses-feature', 'name', required="false",
name="android.hardware.touchscreen"
) == "android.hardware.touchscreen"
def new_zip(self, filename, deleted_files=None, new_files={}):
"""
Create a new zip file
:param filename: the output filename of the zip
:param deleted_files: a regex pattern to remove specific file
:param new_files: a dictionnary of new files
:type filename: string
:type deleted_files: None or a string
:type new_files: a dictionnary (key:filename, value:content of the file)
"""
zout = zipfile.ZipFile(filename, 'w')
for item in self.zip.infolist():
# Block one: deleted_files, or deleted_files and new_files
if deleted_files is not None:
if re.match(deleted_files, item.filename) is None:
# if the regex of deleted_files doesn't match the filename
if new_files is not False:
if item.filename in new_files:
# and if the filename is in new_files
zout.writestr(item, new_files[item.filename])
continue
# Otherwise, write the original file.
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
# Block two: deleted_files is None, new_files is not empty
elif new_files is not False:
if item.filename in new_files:
zout.writestr(item, new_files[item.filename])
else:
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
# Block three: deleted_files is None, new_files is empty.
# Just write out the default zip
else:
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
zout.close()
def get_android_manifest_axml(self):
"""
Return the :class:`AXMLPrinter` object which corresponds to the AndroidManifest.xml file
:rtype: :class:`~androguard.core.bytecodes.axml.AXMLPrinter`
"""
try:
return self.axml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_manifest_xml(self):
"""
Return the parsed xml object which corresponds to the AndroidManifest.xml file
:rtype: :class:`~lxml.etree.Element`
"""
try:
return self.xml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_resources(self):
"""
Return the :class:`~androguard.core.bytecodes.axml.ARSCParser`
object which corresponds to the resources.arsc file
:rtype: :class:`~androguard.core.bytecodes.axml.ARSCParser`
"""
try:
return self.arsc["resources.arsc"]
except KeyError:
if "resources.arsc" not in self.zip.namelist():
# There is a rare case, that no resource file is supplied.
# Maybe it was added manually, thus we check here
return None
self.arsc["resources.arsc"] = ARSCParser(self.zip.read("resources.arsc"))
return self.arsc["resources.arsc"]
def show(self):
self.get_files_types()
print("FILES: ")
for i in self.get_files():
try:
print("\t", i, self._files[i], "%x" % self.files_crc32[i])
except KeyError:
print("\t", i, "%x" % self.files_crc32[i])
print("DECLARED PERMISSIONS:")
declared_permissions = self.get_declared_permissions()
for i in declared_permissions:
print("\t", i)
print("REQUESTED PERMISSIONS:")
requested_permissions = self.get_permissions()
for i in requested_permissions:
print("\t", i)
print("MAIN ACTIVITY: ", self.get_main_activity())
print("ACTIVITIES: ")
activities = self.get_activities()
for i in activities:
filters = self.get_intent_filters("activity", i)
print("\t", i, filters or "")
print("SERVICES: ")
services = self.get_services()
for i in services:
filters = self.get_intent_filters("service", i)
print("\t", i, filters or "")
print("RECEIVERS: ")
receivers = self.get_receivers()
for i in receivers:
filters = self.get_intent_filters("receiver", i)
print("\t", i, filters or "")
print("PROVIDERS: ", self.get_providers())
@property
def application(self):
return self.get_app_name()
@property
def packagename(self):
return self.get_package()
@property
def version_name(self):
return self.get_androidversion_name()
@property
def version_code(self):
return self.get_androidversion_code()
@property
def icon_info(self):
return self.get_app_icon()
@property
def icon_data(self):
app_icon_file = self.get_app_icon()
app_icon_data = None
try:
app_icon_data = self.get_file(app_icon_file)
except FileNotPresent:
try:
app_icon_data = self.get_file(app_icon_file.encode().decode('cp437'))
except FileNotPresent:
pass
return app_icon_data
|
appknox/pyaxmlparser | pyaxmlparser/core.py | APK._format_value | python | def _format_value(self, value):
if len(value) > 0:
if value[0] == ".":
value = self.package + value
else:
v_dot = value.find(".")
if v_dot == 0:
value = self.package + "." + value
elif v_dot == -1:
value = self.package + "." + value
return value | Format a value with packagename, if not already set
:param value:
:return: | train | https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/core.py#L633-L649 | null | class APK(object):
def __init__(self, filename, raw=False, magic_file=None, skip_analysis=False, testzip=False):
"""
This class can access to all elements in an APK file
example::
APK("myfile.apk")
APK(read("myfile.apk"), raw=True)
:param filename: specify the path of the file, or raw data
:param raw: specify if the filename is a path or raw data (optional)
:param magic_file: specify the magic file (not used anymore - legacy only)
:param skip_analysis: Skip the analysis, e.g. no manifest files are read. (default: False)
:param testzip: Test the APK for integrity, e.g. if the ZIP file is broken.
Throw an exception on failure (default False)
:type filename: string
:type raw: boolean
:type magic_file: string
:type skip_analysis: boolean
:type testzip: boolean
"""
if magic_file:
log.warning("You set magic_file but this parameter is actually unused. You should remove it.")
self.filename = filename
self.xml = {}
self.axml = {}
self.arsc = {}
self.package = ""
self.androidversion = {}
self.permissions = []
self.uses_permissions = []
self.declared_permissions = {}
self.valid_apk = False
self._files = {}
self.files_crc32 = {}
if raw is True:
self.__raw = bytearray(filename)
self._sha256 = hashlib.sha256(self.__raw).hexdigest()
# Set the filename to something sane
self.filename = "raw_apk_sha256:{}".format(self._sha256)
else:
self.__raw = bytearray(read(filename))
self.zip = zipfile.ZipFile(io.BytesIO(self.__raw), mode="r")
if testzip:
# Test the zipfile for integrity before continuing.
# This process might be slow, as the whole file is read.
# Therefore it is possible to enable it as a separate feature.
#
# A short benchmark showed, that testing the zip takes about 10 times longer!
# e.g. normal zip loading (skip_analysis=True) takes about 0.01s, where
# testzip takes 0.1s!
ret = self.zip.testzip()
if ret is not None:
# we could print the filename here, but there are zip which are so broken
# That the filename is either very very long or does not make any sense.
# Thus we do not do it, the user might find out by using other tools.
raise BrokenAPKError("The APK is probably broken: testzip returned an error.")
if not skip_analysis:
self._apk_analysis()
def _ns(self, name):
"""
return the name including the Android namespace
"""
return NS_ANDROID + name
def _apk_analysis(self):
"""
Run analysis on the APK file.
This method is usually called by __init__ except if skip_analysis is False.
It will then parse the AndroidManifest.xml and set all fields in the APK class which can be
extracted from the Manifest.
"""
i = "AndroidManifest.xml"
try:
manifest_data = self.zip.read(i)
except KeyError:
log.warning("Missing AndroidManifest.xml. Is this an APK file?")
else:
ap = AXMLPrinter(manifest_data)
if not ap.is_valid():
log.error("Error while parsing AndroidManifest.xml - is the file valid?")
return
self.axml[i] = ap
self.xml[i] = self.axml[i].get_xml_obj()
if self.axml[i].is_packed():
log.warning("XML Seems to be packed, operations on the AndroidManifest.xml might fail.")
if self.xml[i] is not None:
if self.xml[i].tag != "manifest":
log.error("AndroidManifest.xml does not start with a <manifest> tag! Is this a valid APK?")
return
self.package = self.get_attribute_value("manifest", "package")
self.androidversion["Code"] = self.get_attribute_value("manifest", "versionCode")
self.androidversion["Name"] = self.get_attribute_value("manifest", "versionName")
permission = list(self.get_all_attribute_value("uses-permission", "name"))
self.permissions = list(set(self.permissions + permission))
for uses_permission in self.find_tags("uses-permission"):
self.uses_permissions.append([
self.get_value_from_tag(uses_permission, "name"),
self._get_permission_maxsdk(uses_permission)
])
# getting details of the declared permissions
for d_perm_item in self.find_tags('permission'):
d_perm_name = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "name")))
d_perm_label = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "label")))
d_perm_description = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "description")))
d_perm_permissionGroup = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "permissionGroup")))
d_perm_protectionLevel = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "protectionLevel")))
d_perm_details = {
"label": d_perm_label,
"description": d_perm_description,
"permissionGroup": d_perm_permissionGroup,
"protectionLevel": d_perm_protectionLevel,
}
self.declared_permissions[d_perm_name] = d_perm_details
self.valid_apk = True
def __getstate__(self):
"""
Function for pickling APK Objects.
We remove the zip from the Object, as it is not pickable
And it does not make any sense to pickle it anyways.
:return: the picklable APK Object without zip.
"""
# Upon pickling, we need to remove the ZipFile
x = self.__dict__
x['axml'] = str(x['axml'])
x['xml'] = str(x['xml'])
del x['zip']
return x
def __setstate__(self, state):
"""
Load a pickled APK Object and restore the state
We load the zip file back by reading __raw from the Object.
:param state: pickled state
"""
self.__dict__ = state
self.zip = zipfile.ZipFile(io.BytesIO(self.__raw), mode="r")
def _get_res_string_value(self, string):
if not string.startswith('@string/'):
return string
string_key = string[9:]
res_parser = self.get_android_resources()
if not res_parser:
return ''
string_value = ''
for package_name in res_parser.get_packages_names():
extracted_values = res_parser.get_string(package_name, string_key)
if extracted_values:
string_value = extracted_values[1]
break
return string_value
def _get_permission_maxsdk(self, item):
maxSdkVersion = None
try:
maxSdkVersion = int(self.get_value_from_tag(item, "maxSdkVersion"))
except ValueError:
log.warning(self.get_max_sdk_version() + 'is not a valid value for <uses-permission> maxSdkVersion')
except TypeError:
pass
return maxSdkVersion
def is_valid_APK(self):
"""
Return true if the APK is valid, false otherwise.
An APK is seen as valid, if the AndroidManifest.xml could be successful parsed.
This does not mean that the APK has a valid signature nor that the APK
can be installed on an Android system.
:rtype: boolean
"""
return self.valid_apk
def get_filename(self):
"""
Return the filename of the APK
:rtype: :class:`str`
"""
return self.filename
def get_app_name(self):
"""
Return the appname of the APK
This name is read from the AndroidManifest.xml
using the application android:label.
If no label exists, the android:label of the main activity is used.
If there is also no main activity label, an empty string is returned.
:rtype: :class:`str`
"""
app_name = self.get_attribute_value('application', 'label')
if app_name is None:
activities = self.get_main_activities()
main_activity_name = None
if len(activities) > 0:
main_activity_name = activities.pop()
app_name = self.get_attribute_value(
'activity', 'label', name=main_activity_name
)
if app_name is None:
# No App name set
# TODO return packagename instead?
log.warning("It looks like that no app name is set for the main activity!")
return ""
if app_name.startswith("@"):
res_parser = self.get_android_resources()
if not res_parser:
# TODO: What should be the correct return value here?
return app_name
res_id, package = res_parser.parse_id(app_name)
# If the package name is the same as the APK package,
# we should be able to resolve the ID.
if package and package != self.get_package():
if package == 'android':
# TODO: we can not resolve this, as we lack framework-res.apk
# one exception would be when parsing framework-res.apk directly.
log.warning("Resource ID with android package name encountered! "
"Will not resolve, framework-res.apk would be required.")
return app_name
else:
# TODO should look this up, might be in the resources
log.warning("Resource ID with Package name '{}' encountered! Will not resolve".format(package))
return app_name
try:
app_name = res_parser.get_resolved_res_configs(
res_id,
ARSCResTableConfig.default_config())[0][1]
except Exception as e:
log.warning("Exception selecting app name: %s" % e)
return app_name
def get_app_icon(self, max_dpi=65536):
"""
Return the first icon file name, which density is not greater than max_dpi,
unless exact icon resolution is set in the manifest, in which case
return the exact file.
This information is read from the AndroidManifest.xml
From https://developer.android.com/guide/practices/screens_support.html
and https://developer.android.com/ndk/reference/group___configuration.html
* DEFAULT 0dpi
* ldpi (low) 120dpi
* mdpi (medium) 160dpi
* TV 213dpi
* hdpi (high) 240dpi
* xhdpi (extra-high) 320dpi
* xxhdpi (extra-extra-high) 480dpi
* xxxhdpi (extra-extra-extra-high) 640dpi
* anydpi 65534dpi (0xFFFE)
* nodpi 65535dpi (0xFFFF)
There is a difference between nodpi and anydpi:
nodpi will be used if no other density is specified. Or the density does not match.
nodpi is the fallback for everything else. If there is a resource that matches the DPI,
this is used.
anydpi is also valid for all densities but in this case, anydpi will overrule all other files!
Therefore anydpi is usually used with vector graphics and with constraints on the API level.
For example adaptive icons are usually marked as anydpi.
When it comes now to selecting an icon, there is the following flow:
1) is there an anydpi icon?
2) is there an icon for the dpi of the device?
3) is there a nodpi icon?
4) (only on very old devices) is there a icon with dpi 0 (the default)
For more information read here: https://stackoverflow.com/a/34370735/446140
:rtype: :class:`str`
"""
main_activity_name = self.get_main_activity()
app_icon = self.get_attribute_value(
'activity', 'icon', name=main_activity_name)
if not app_icon:
app_icon = self.get_attribute_value('application', 'icon')
res_parser = self.get_android_resources()
if not res_parser:
# Can not do anything below this point to resolve...
return None
if not app_icon:
res_id = res_parser.get_res_id_by_key(self.package, 'mipmap', 'ic_launcher')
if res_id:
app_icon = "@%x" % res_id
if not app_icon:
res_id = res_parser.get_res_id_by_key(self.package, 'drawable', 'ic_launcher')
if res_id:
app_icon = "@%x" % res_id
if not app_icon:
# If the icon can not be found, return now
return None
if app_icon.startswith("@"):
res_id = int(app_icon[1:], 16)
candidates = res_parser.get_resolved_res_configs(res_id)
app_icon = None
current_dpi = -1
try:
for config, file_name in candidates:
dpi = config.get_density()
if current_dpi < dpi <= max_dpi:
app_icon = file_name
current_dpi = dpi
except Exception as e:
log.warning("Exception selecting app icon: %s" % e)
return app_icon
def get_package(self):
"""
Return the name of the package
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.package
def get_androidversion_code(self):
"""
Return the android version code
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.androidversion["Code"]
def get_androidversion_name(self):
"""
Return the android version name
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.androidversion["Name"]
def get_files(self):
"""
Return the file names inside the APK.
:rtype: a list of :class:`str`
"""
return self.zip.namelist()
def _get_file_magic_name(self, buffer):
"""
Return the filetype guessed for a buffer
:param buffer: bytes
:return: str of filetype
"""
default = "Unknown"
ftype = None
try:
# Magic is optional
import magic
except ImportError:
return default
try:
# There are several implementations of magic,
# unfortunately all called magic
# We use this one: https://github.com/ahupp/python-magic/
getattr(magic, "MagicException")
except AttributeError:
# Looks like no magic was installed
return default
try:
ftype = magic.from_buffer(buffer[:1024])
except magic.MagicError as e:
log.exception("Error getting the magic type!")
return default
if not ftype:
return default
else:
return self._patch_magic(buffer, ftype)
@property
def files(self):
"""
Returns a dictionary of filenames and detected magic type
:return: dictionary of files and their mime type
"""
return self.get_files_types()
def get_files_types(self):
"""
Return the files inside the APK with their associated types (by using python-magic)
:rtype: a dictionnary
"""
if self._files == {}:
# Generate File Types / CRC List
for i in self.get_files():
buffer = self.zip.read(i)
self.files_crc32[i] = crc32(buffer)
# FIXME why not use the crc from the zipfile?
# should be validated as well.
# crc = self.zip.getinfo(i).CRC
self._files[i] = self._get_file_magic_name(buffer)
return self._files
def _patch_magic(self, buffer, orig):
"""
Overwrite some probably wrong detections by mime libraries
:param buffer: bytes of the file to detect
:param orig: guess by mime libary
:return: corrected guess
"""
if ("Zip" in orig) or ('(JAR)' in orig):
val = is_android_raw(buffer)
if val == "APK":
return "Android application package file"
return orig
def get_files_crc32(self):
"""
Calculates and returns a dictionary of filenames and CRC32
:return: dict of filename: CRC32
"""
if self.files_crc32 == {}:
for i in self.get_files():
buffer = self.zip.read(i)
self.files_crc32[i] = crc32(buffer)
return self.files_crc32
def get_files_information(self):
"""
Return the files inside the APK with their associated types and crc32
:rtype: str, str, int
"""
for k in self.get_files():
yield k, self.get_files_types()[k], self.get_files_crc32()[k]
def get_raw(self):
"""
Return raw bytes of the APK
:rtype: bytes
"""
return self.__raw
def get_file(self, filename):
"""
Return the raw data of the specified filename
inside the APK
:rtype: bytes
"""
try:
return self.zip.read(filename)
except KeyError:
raise FileNotPresent(filename)
def get_dex(self):
"""
Return the raw data of the classes dex file
This will give you the data of the file called `classes.dex`
inside the APK. If the APK has multiple DEX files, you need to use :func:`~APK.get_all_dex`.
:rtype: bytes
"""
try:
return self.get_file("classes.dex")
except FileNotPresent:
return ""
def get_dex_names(self):
"""
Return the names of all DEX files found in the APK.
This method only accounts for "offical" dex files, i.e. all files
in the root directory of the APK named classes.dex or classes[0-9]+.dex
:rtype: a list of str
"""
dexre = re.compile("classes(\d*).dex")
return filter(lambda x: dexre.match(x), self.get_files())
def get_all_dex(self):
"""
Return the raw data of all classes dex files
:rtype: a generator of bytes
"""
for dex_name in self.get_dex_names():
yield self.get_file(dex_name)
def is_multidex(self):
"""
Test if the APK has multiple DEX files
:return: True if multiple dex found, otherwise False
"""
dexre = re.compile("^classes(\d+)?.dex$")
return len([instance for instance in self.get_files() if dexre.search(instance)]) > 1
@DeprecationWarning
def get_elements(self, tag_name, attribute, with_namespace=True):
"""
Deprecated: use `get_all_attribute_value()` instead
Return elements in xml files which match with the tag name and the specific attribute
:param tag_name: a string which specify the tag name
:param attribute: a string which specify the attribute
"""
for i in self.xml:
if self.xml[i] is None:
continue
for item in self.xml[i].findall('.//' + tag_name):
if with_namespace:
value = item.get(self._ns(attribute))
else:
value = item.get(attribute)
# There might be an attribute without the namespace
if value:
yield self._format_value(value)
@DeprecationWarning
def get_element(self, tag_name, attribute, **attribute_filter):
"""
:Deprecated: use `get_attribute_value()` instead
Return element in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:rtype: string
"""
for i in self.xml:
if self.xml[i] is None:
continue
tag = self.xml[i].findall('.//' + tag_name)
if len(tag) == 0:
return None
for item in tag:
skip_this_item = False
for attr, val in list(attribute_filter.items()):
attr_val = item.get(self._ns(attr))
if attr_val != val:
skip_this_item = True
break
if skip_this_item:
continue
value = item.get(self._ns(attribute))
if value is not None:
return value
return None
def get_all_attribute_value(
self, tag_name, attribute, format_value=True, **attribute_filter
):
"""
Return all the attribute values in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:param format_value: specify if the value needs to be formatted with packagename
:type format_value: boolean
"""
tags = self.find_tags(tag_name, **attribute_filter)
for tag in tags:
value = tag.get(attribute) or tag.get(self._ns(attribute))
if value is not None:
if format_value:
yield self._format_value(value)
else:
yield value
def get_attribute_value(
self, tag_name, attribute, format_value=False, **attribute_filter
):
"""
Return the attribute value in xml files which matches the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:param format_value: specify if the value needs to be formatted with packagename
:type format_value: boolean
"""
for value in self.get_all_attribute_value(
tag_name, attribute, format_value, **attribute_filter):
if value is not None:
return value
def get_value_from_tag(self, tag, attribute):
"""
Return the value of the attribute in a specific tag
:param tag: specify the tag element
:type tag: Element
:param attribute: specify the attribute
:type attribute: string
"""
# TODO: figure out if both android:name and name tag exist which one to give preference
value = tag.get(self._ns(attribute))
if value is None:
log.warning("Failed to get the attribute with namespace")
value = tag.get(attribute)
return value
def find_tags(self, tag_name, **attribute_filter):
"""
Return a list of all the matched tags in all available xml
:param tag: specify the tag name
:type tag: string
"""
all_tags = [
self.find_tags_from_xml(
i, tag_name, **attribute_filter
)
for i in self.xml
]
return [tag for tag_list in all_tags for tag in tag_list]
def find_tags_from_xml(
self, xml_name, tag_name, **attribute_filter
):
"""
Return a list of all the matched tags in a specific xml
:param xml_name: specify from which xml to pick the tag from
:type xml_name: string
:param tag_name: specify the tag name
:type tag_name: string
"""
xml = self.xml[xml_name]
if xml is None:
return []
if xml.tag == tag_name:
if self.is_tag_matched(
xml.tag, **attribute_filter
):
return [xml]
return []
tags = xml.findall(".//" + tag_name)
return [
tag for tag in tags if self.is_tag_matched(
tag, **attribute_filter
)
]
def is_tag_matched(self, tag, **attribute_filter):
"""
Return true if the attributes matches in attribute filter
:param tag: specify the tag element
:type tag: Element
:param attribute: specify the attribute
:type attribute: string
"""
if len(attribute_filter) <= 0:
return True
for attr, value in attribute_filter.items():
# TODO: figure out if both android:name and name tag exist which one to give preference
_value = tag.get(self._ns(attr))
if _value is None:
log.warning("Failed to get the attribute with namespace")
_value = tag.get(attr)
if _value != value:
return False
return True
def get_main_activities(self):
"""
Return names of the main activities
These values are read from the AndroidManifest.xml
:rtype: a set of str
"""
x = set()
y = set()
for i in self.xml:
if self.xml[i] is None:
continue
activities_and_aliases = self.xml[i].findall(".//activity") + \
self.xml[i].findall(".//activity-alias")
for item in activities_and_aliases:
# Some applications have more than one MAIN activity.
# For example: paid and free content
activityEnabled = item.get(self._ns("enabled"))
if activityEnabled == "false":
continue
for sitem in item.findall(".//action"):
val = sitem.get(self._ns("name"))
if val == "android.intent.action.MAIN":
activity = item.get(self._ns("name"))
if activity is not None:
x.add(item.get(self._ns("name")))
else:
log.warning('Main activity without name')
for sitem in item.findall(".//category"):
val = sitem.get(self._ns("name"))
if val == "android.intent.category.LAUNCHER":
activity = item.get(self._ns("name"))
if activity is not None:
y.add(item.get(self._ns("name")))
else:
log.warning('Launcher activity without name')
return x.intersection(y)
def get_main_activity(self):
"""
Return the name of the main activity
This value is read from the AndroidManifest.xml
:rtype: str
"""
activities = self.get_main_activities()
if len(activities) > 0:
return self._format_value(activities.pop())
return None
def get_activities(self):
"""
Return the android:name attribute of all activities
:rtype: a list of str
"""
return list(self.get_all_attribute_value("activity", "name"))
def get_services(self):
"""
Return the android:name attribute of all services
:rtype: a list of str
"""
return list(self.get_all_attribute_value("service", "name"))
def get_receivers(self):
"""
Return the android:name attribute of all receivers
:rtype: a list of string
"""
return list(self.get_all_attribute_value("receiver", "name"))
def get_providers(self):
"""
Return the android:name attribute of all providers
:rtype: a list of string
"""
return list(self.get_all_attribute_value("provider", "name"))
def get_intent_filters(self, itemtype, name):
"""
Find intent filters for a given item and name.
Intent filter are attached to activities, services or receivers.
You can search for the intent filters of such items and get a dictionary of all
attached actions and intent categories.
:param itemtype: the type of parent item to look for, e.g. `activity`, `service` or `receiver`
:param name: the `android:name` of the parent item, e.g. activity name
:return: a dictionary with the keys `action` and `category` containing the `android:name` of those items
"""
d = {"action": [], "category": []}
for i in self.xml:
# TODO: this can probably be solved using a single xpath
for item in self.xml[i].findall(".//" + itemtype):
if self._format_value(item.get(self._ns("name"))) == name:
for sitem in item.findall(".//intent-filter"):
for ssitem in sitem.findall("action"):
if ssitem.get(self._ns("name")) not in d["action"]:
d["action"].append(ssitem.get(self._ns("name")))
for ssitem in sitem.findall("category"):
if ssitem.get(self._ns("name")) not in d["category"]:
d["category"].append(ssitem.get(self._ns("name")))
if not d["action"]:
del d["action"]
if not d["category"]:
del d["category"]
return d
def get_permissions(self):
"""
Return permissions names declared in the AndroidManifest.xml.
It is possible that permissions are returned multiple times,
as this function does not filter the permissions, i.e. it shows you
exactly what was defined in the AndroidManifest.xml.
Implied permissions, which are granted automatically, are not returned
here. Use :meth:`get_uses_implied_permission_list` if you need a list
of implied permissions.
:returns: A list of permissions
:rtype: list
"""
return self.permissions
def get_uses_implied_permission_list(self):
"""
Return all permissions implied by the target SDK or other permissions.
:rtype: list of string
"""
target_sdk_version = self.get_effective_target_sdk_version()
READ_CALL_LOG = 'android.permission.READ_CALL_LOG'
READ_CONTACTS = 'android.permission.READ_CONTACTS'
READ_EXTERNAL_STORAGE = 'android.permission.READ_EXTERNAL_STORAGE'
READ_PHONE_STATE = 'android.permission.READ_PHONE_STATE'
WRITE_CALL_LOG = 'android.permission.WRITE_CALL_LOG'
WRITE_CONTACTS = 'android.permission.WRITE_CONTACTS'
WRITE_EXTERNAL_STORAGE = 'android.permission.WRITE_EXTERNAL_STORAGE'
implied = []
implied_WRITE_EXTERNAL_STORAGE = False
if target_sdk_version < 4:
if WRITE_EXTERNAL_STORAGE not in self.permissions:
implied.append([WRITE_EXTERNAL_STORAGE, None])
implied_WRITE_EXTERNAL_STORAGE = True
if READ_PHONE_STATE not in self.permissions:
implied.append([READ_PHONE_STATE, None])
if (WRITE_EXTERNAL_STORAGE in self.permissions or implied_WRITE_EXTERNAL_STORAGE) \
and READ_EXTERNAL_STORAGE not in self.permissions:
maxSdkVersion = None
for name, version in self.uses_permissions:
if name == WRITE_EXTERNAL_STORAGE:
maxSdkVersion = version
break
implied.append([READ_EXTERNAL_STORAGE, maxSdkVersion])
if target_sdk_version < 16:
if READ_CONTACTS in self.permissions \
and READ_CALL_LOG not in self.permissions:
implied.append([READ_CALL_LOG, None])
if WRITE_CONTACTS in self.permissions \
and WRITE_CALL_LOG not in self.permissions:
implied.append([WRITE_CALL_LOG, None])
return implied
def get_details_permissions(self):
"""
Return permissions with details
:rtype: dict of {permission: [protectionLevel, label, description]}
"""
l = {}
for i in self.permissions:
if i in self.permission_module:
x = self.permission_module[i]
l[i] = [x["protectionLevel"], x["label"], x["description"]]
else:
# FIXME: the permission might be signature, if it is defined by the app itself!
l[i] = ["normal", "Unknown permission from android reference",
"Unknown permission from android reference"]
return l
@DeprecationWarning
def get_requested_permissions(self):
"""
Returns all requested permissions.
It has the same result as :meth:`get_permissions` and might be removed in the future
:rtype: list of str
"""
return self.get_permissions()
def get_requested_aosp_permissions(self):
"""
Returns requested permissions declared within AOSP project.
This includes several other permissions as well, which are in the platform apps.
:rtype: list of str
"""
aosp_permissions = []
all_permissions = self.get_permissions()
for perm in all_permissions:
if perm in list(self.permission_module.keys()):
aosp_permissions.append(perm)
return aosp_permissions
def get_requested_aosp_permissions_details(self):
"""
Returns requested aosp permissions with details.
:rtype: dictionary
"""
l = {}
for i in self.permissions:
try:
l[i] = self.permission_module[i]
except KeyError:
# if we have not found permission do nothing
continue
return l
def get_requested_third_party_permissions(self):
"""
Returns list of requested permissions not declared within AOSP project.
:rtype: list of strings
"""
third_party_permissions = []
all_permissions = self.get_permissions()
for perm in all_permissions:
if perm not in list(self.permission_module.keys()):
third_party_permissions.append(perm)
return third_party_permissions
def get_declared_permissions(self):
"""
Returns list of the declared permissions.
:rtype: list of strings
"""
return list(self.declared_permissions.keys())
def get_declared_permissions_details(self):
"""
Returns declared permissions with the details.
:rtype: dict
"""
return self.declared_permissions
def get_max_sdk_version(self):
"""
Return the android:maxSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "maxSdkVersion")
def get_min_sdk_version(self):
"""
Return the android:minSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "minSdkVersion")
def get_target_sdk_version(self):
"""
Return the android:targetSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "targetSdkVersion")
def get_effective_target_sdk_version(self):
"""
Return the effective targetSdkVersion, always returns int > 0.
If the targetSdkVersion is not set, it defaults to 1. This is
set based on defaults as defined in:
https://developer.android.com/guide/topics/manifest/uses-sdk-element.html
:rtype: int
"""
target_sdk_version = self.get_target_sdk_version()
if not target_sdk_version:
target_sdk_version = self.get_min_sdk_version()
try:
return int(target_sdk_version)
except (ValueError, TypeError):
return 1
def get_libraries(self):
"""
Return the android:name attributes for libraries
:rtype: list
"""
return list(self.get_all_attribute_value("uses-library", "name"))
def get_features(self):
"""
Return a list of all android:names found for the tag uses-feature
in the AndroidManifest.xml
:return: list
"""
return list(self.get_all_attribute_value("uses-feature", "name"))
def is_wearable(self):
"""
Checks if this application is build for wearables by
checking if it uses the feature 'android.hardware.type.watch'
See: https://developer.android.com/training/wearables/apps/creating.html for more information.
Not every app is setting this feature (not even the example Google provides),
so it might be wise to not 100% rely on this feature.
:return: True if wearable, False otherwise
"""
return 'android.hardware.type.watch' in self.get_features()
def is_leanback(self):
"""
Checks if this application is build for TV (Leanback support)
by checkin if it uses the feature 'android.software.leanback'
:return: True if leanback feature is used, false otherwise
"""
return 'android.software.leanback' in self.get_features()
def is_androidtv(self):
"""
Checks if this application does not require a touchscreen,
as this is the rule to get into the TV section of the Play Store
See: https://developer.android.com/training/tv/start/start.html for more information.
:return: True if 'android.hardware.touchscreen' is not required, False otherwise
"""
return self.get_attribute_value(
'uses-feature', 'name', required="false",
name="android.hardware.touchscreen"
) == "android.hardware.touchscreen"
def new_zip(self, filename, deleted_files=None, new_files={}):
"""
Create a new zip file
:param filename: the output filename of the zip
:param deleted_files: a regex pattern to remove specific file
:param new_files: a dictionnary of new files
:type filename: string
:type deleted_files: None or a string
:type new_files: a dictionnary (key:filename, value:content of the file)
"""
zout = zipfile.ZipFile(filename, 'w')
for item in self.zip.infolist():
# Block one: deleted_files, or deleted_files and new_files
if deleted_files is not None:
if re.match(deleted_files, item.filename) is None:
# if the regex of deleted_files doesn't match the filename
if new_files is not False:
if item.filename in new_files:
# and if the filename is in new_files
zout.writestr(item, new_files[item.filename])
continue
# Otherwise, write the original file.
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
# Block two: deleted_files is None, new_files is not empty
elif new_files is not False:
if item.filename in new_files:
zout.writestr(item, new_files[item.filename])
else:
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
# Block three: deleted_files is None, new_files is empty.
# Just write out the default zip
else:
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
zout.close()
def get_android_manifest_axml(self):
"""
Return the :class:`AXMLPrinter` object which corresponds to the AndroidManifest.xml file
:rtype: :class:`~androguard.core.bytecodes.axml.AXMLPrinter`
"""
try:
return self.axml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_manifest_xml(self):
"""
Return the parsed xml object which corresponds to the AndroidManifest.xml file
:rtype: :class:`~lxml.etree.Element`
"""
try:
return self.xml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_resources(self):
"""
Return the :class:`~androguard.core.bytecodes.axml.ARSCParser`
object which corresponds to the resources.arsc file
:rtype: :class:`~androguard.core.bytecodes.axml.ARSCParser`
"""
try:
return self.arsc["resources.arsc"]
except KeyError:
if "resources.arsc" not in self.zip.namelist():
# There is a rare case, that no resource file is supplied.
# Maybe it was added manually, thus we check here
return None
self.arsc["resources.arsc"] = ARSCParser(self.zip.read("resources.arsc"))
return self.arsc["resources.arsc"]
def show(self):
self.get_files_types()
print("FILES: ")
for i in self.get_files():
try:
print("\t", i, self._files[i], "%x" % self.files_crc32[i])
except KeyError:
print("\t", i, "%x" % self.files_crc32[i])
print("DECLARED PERMISSIONS:")
declared_permissions = self.get_declared_permissions()
for i in declared_permissions:
print("\t", i)
print("REQUESTED PERMISSIONS:")
requested_permissions = self.get_permissions()
for i in requested_permissions:
print("\t", i)
print("MAIN ACTIVITY: ", self.get_main_activity())
print("ACTIVITIES: ")
activities = self.get_activities()
for i in activities:
filters = self.get_intent_filters("activity", i)
print("\t", i, filters or "")
print("SERVICES: ")
services = self.get_services()
for i in services:
filters = self.get_intent_filters("service", i)
print("\t", i, filters or "")
print("RECEIVERS: ")
receivers = self.get_receivers()
for i in receivers:
filters = self.get_intent_filters("receiver", i)
print("\t", i, filters or "")
print("PROVIDERS: ", self.get_providers())
@property
def application(self):
return self.get_app_name()
@property
def packagename(self):
return self.get_package()
@property
def version_name(self):
return self.get_androidversion_name()
@property
def version_code(self):
return self.get_androidversion_code()
@property
def icon_info(self):
return self.get_app_icon()
@property
def icon_data(self):
app_icon_file = self.get_app_icon()
app_icon_data = None
try:
app_icon_data = self.get_file(app_icon_file)
except FileNotPresent:
try:
app_icon_data = self.get_file(app_icon_file.encode().decode('cp437'))
except FileNotPresent:
pass
return app_icon_data
|
appknox/pyaxmlparser | pyaxmlparser/core.py | APK.get_element | python | def get_element(self, tag_name, attribute, **attribute_filter):
for i in self.xml:
if self.xml[i] is None:
continue
tag = self.xml[i].findall('.//' + tag_name)
if len(tag) == 0:
return None
for item in tag:
skip_this_item = False
for attr, val in list(attribute_filter.items()):
attr_val = item.get(self._ns(attr))
if attr_val != val:
skip_this_item = True
break
if skip_this_item:
continue
value = item.get(self._ns(attribute))
if value is not None:
return value
return None | :Deprecated: use `get_attribute_value()` instead
Return element in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:rtype: string | train | https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/core.py#L652-L683 | [
"def _ns(self, name):\n \"\"\"\n return the name including the Android namespace\n \"\"\"\n return NS_ANDROID + name\n"
] | class APK(object):
def __init__(self, filename, raw=False, magic_file=None, skip_analysis=False, testzip=False):
"""
This class can access to all elements in an APK file
example::
APK("myfile.apk")
APK(read("myfile.apk"), raw=True)
:param filename: specify the path of the file, or raw data
:param raw: specify if the filename is a path or raw data (optional)
:param magic_file: specify the magic file (not used anymore - legacy only)
:param skip_analysis: Skip the analysis, e.g. no manifest files are read. (default: False)
:param testzip: Test the APK for integrity, e.g. if the ZIP file is broken.
Throw an exception on failure (default False)
:type filename: string
:type raw: boolean
:type magic_file: string
:type skip_analysis: boolean
:type testzip: boolean
"""
if magic_file:
log.warning("You set magic_file but this parameter is actually unused. You should remove it.")
self.filename = filename
self.xml = {}
self.axml = {}
self.arsc = {}
self.package = ""
self.androidversion = {}
self.permissions = []
self.uses_permissions = []
self.declared_permissions = {}
self.valid_apk = False
self._files = {}
self.files_crc32 = {}
if raw is True:
self.__raw = bytearray(filename)
self._sha256 = hashlib.sha256(self.__raw).hexdigest()
# Set the filename to something sane
self.filename = "raw_apk_sha256:{}".format(self._sha256)
else:
self.__raw = bytearray(read(filename))
self.zip = zipfile.ZipFile(io.BytesIO(self.__raw), mode="r")
if testzip:
# Test the zipfile for integrity before continuing.
# This process might be slow, as the whole file is read.
# Therefore it is possible to enable it as a separate feature.
#
# A short benchmark showed, that testing the zip takes about 10 times longer!
# e.g. normal zip loading (skip_analysis=True) takes about 0.01s, where
# testzip takes 0.1s!
ret = self.zip.testzip()
if ret is not None:
# we could print the filename here, but there are zip which are so broken
# That the filename is either very very long or does not make any sense.
# Thus we do not do it, the user might find out by using other tools.
raise BrokenAPKError("The APK is probably broken: testzip returned an error.")
if not skip_analysis:
self._apk_analysis()
def _ns(self, name):
"""
return the name including the Android namespace
"""
return NS_ANDROID + name
def _apk_analysis(self):
"""
Run analysis on the APK file.
This method is usually called by __init__ except if skip_analysis is False.
It will then parse the AndroidManifest.xml and set all fields in the APK class which can be
extracted from the Manifest.
"""
i = "AndroidManifest.xml"
try:
manifest_data = self.zip.read(i)
except KeyError:
log.warning("Missing AndroidManifest.xml. Is this an APK file?")
else:
ap = AXMLPrinter(manifest_data)
if not ap.is_valid():
log.error("Error while parsing AndroidManifest.xml - is the file valid?")
return
self.axml[i] = ap
self.xml[i] = self.axml[i].get_xml_obj()
if self.axml[i].is_packed():
log.warning("XML Seems to be packed, operations on the AndroidManifest.xml might fail.")
if self.xml[i] is not None:
if self.xml[i].tag != "manifest":
log.error("AndroidManifest.xml does not start with a <manifest> tag! Is this a valid APK?")
return
self.package = self.get_attribute_value("manifest", "package")
self.androidversion["Code"] = self.get_attribute_value("manifest", "versionCode")
self.androidversion["Name"] = self.get_attribute_value("manifest", "versionName")
permission = list(self.get_all_attribute_value("uses-permission", "name"))
self.permissions = list(set(self.permissions + permission))
for uses_permission in self.find_tags("uses-permission"):
self.uses_permissions.append([
self.get_value_from_tag(uses_permission, "name"),
self._get_permission_maxsdk(uses_permission)
])
# getting details of the declared permissions
for d_perm_item in self.find_tags('permission'):
d_perm_name = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "name")))
d_perm_label = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "label")))
d_perm_description = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "description")))
d_perm_permissionGroup = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "permissionGroup")))
d_perm_protectionLevel = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "protectionLevel")))
d_perm_details = {
"label": d_perm_label,
"description": d_perm_description,
"permissionGroup": d_perm_permissionGroup,
"protectionLevel": d_perm_protectionLevel,
}
self.declared_permissions[d_perm_name] = d_perm_details
self.valid_apk = True
def __getstate__(self):
"""
Function for pickling APK Objects.
We remove the zip from the Object, as it is not pickable
And it does not make any sense to pickle it anyways.
:return: the picklable APK Object without zip.
"""
# Upon pickling, we need to remove the ZipFile
x = self.__dict__
x['axml'] = str(x['axml'])
x['xml'] = str(x['xml'])
del x['zip']
return x
def __setstate__(self, state):
"""
Load a pickled APK Object and restore the state
We load the zip file back by reading __raw from the Object.
:param state: pickled state
"""
self.__dict__ = state
self.zip = zipfile.ZipFile(io.BytesIO(self.__raw), mode="r")
def _get_res_string_value(self, string):
if not string.startswith('@string/'):
return string
string_key = string[9:]
res_parser = self.get_android_resources()
if not res_parser:
return ''
string_value = ''
for package_name in res_parser.get_packages_names():
extracted_values = res_parser.get_string(package_name, string_key)
if extracted_values:
string_value = extracted_values[1]
break
return string_value
def _get_permission_maxsdk(self, item):
maxSdkVersion = None
try:
maxSdkVersion = int(self.get_value_from_tag(item, "maxSdkVersion"))
except ValueError:
log.warning(self.get_max_sdk_version() + 'is not a valid value for <uses-permission> maxSdkVersion')
except TypeError:
pass
return maxSdkVersion
def is_valid_APK(self):
"""
Return true if the APK is valid, false otherwise.
An APK is seen as valid, if the AndroidManifest.xml could be successful parsed.
This does not mean that the APK has a valid signature nor that the APK
can be installed on an Android system.
:rtype: boolean
"""
return self.valid_apk
def get_filename(self):
"""
Return the filename of the APK
:rtype: :class:`str`
"""
return self.filename
def get_app_name(self):
"""
Return the appname of the APK
This name is read from the AndroidManifest.xml
using the application android:label.
If no label exists, the android:label of the main activity is used.
If there is also no main activity label, an empty string is returned.
:rtype: :class:`str`
"""
app_name = self.get_attribute_value('application', 'label')
if app_name is None:
activities = self.get_main_activities()
main_activity_name = None
if len(activities) > 0:
main_activity_name = activities.pop()
app_name = self.get_attribute_value(
'activity', 'label', name=main_activity_name
)
if app_name is None:
# No App name set
# TODO return packagename instead?
log.warning("It looks like that no app name is set for the main activity!")
return ""
if app_name.startswith("@"):
res_parser = self.get_android_resources()
if not res_parser:
# TODO: What should be the correct return value here?
return app_name
res_id, package = res_parser.parse_id(app_name)
# If the package name is the same as the APK package,
# we should be able to resolve the ID.
if package and package != self.get_package():
if package == 'android':
# TODO: we can not resolve this, as we lack framework-res.apk
# one exception would be when parsing framework-res.apk directly.
log.warning("Resource ID with android package name encountered! "
"Will not resolve, framework-res.apk would be required.")
return app_name
else:
# TODO should look this up, might be in the resources
log.warning("Resource ID with Package name '{}' encountered! Will not resolve".format(package))
return app_name
try:
app_name = res_parser.get_resolved_res_configs(
res_id,
ARSCResTableConfig.default_config())[0][1]
except Exception as e:
log.warning("Exception selecting app name: %s" % e)
return app_name
def get_app_icon(self, max_dpi=65536):
"""
Return the first icon file name, which density is not greater than max_dpi,
unless exact icon resolution is set in the manifest, in which case
return the exact file.
This information is read from the AndroidManifest.xml
From https://developer.android.com/guide/practices/screens_support.html
and https://developer.android.com/ndk/reference/group___configuration.html
* DEFAULT 0dpi
* ldpi (low) 120dpi
* mdpi (medium) 160dpi
* TV 213dpi
* hdpi (high) 240dpi
* xhdpi (extra-high) 320dpi
* xxhdpi (extra-extra-high) 480dpi
* xxxhdpi (extra-extra-extra-high) 640dpi
* anydpi 65534dpi (0xFFFE)
* nodpi 65535dpi (0xFFFF)
There is a difference between nodpi and anydpi:
nodpi will be used if no other density is specified. Or the density does not match.
nodpi is the fallback for everything else. If there is a resource that matches the DPI,
this is used.
anydpi is also valid for all densities but in this case, anydpi will overrule all other files!
Therefore anydpi is usually used with vector graphics and with constraints on the API level.
For example adaptive icons are usually marked as anydpi.
When it comes now to selecting an icon, there is the following flow:
1) is there an anydpi icon?
2) is there an icon for the dpi of the device?
3) is there a nodpi icon?
4) (only on very old devices) is there a icon with dpi 0 (the default)
For more information read here: https://stackoverflow.com/a/34370735/446140
:rtype: :class:`str`
"""
main_activity_name = self.get_main_activity()
app_icon = self.get_attribute_value(
'activity', 'icon', name=main_activity_name)
if not app_icon:
app_icon = self.get_attribute_value('application', 'icon')
res_parser = self.get_android_resources()
if not res_parser:
# Can not do anything below this point to resolve...
return None
if not app_icon:
res_id = res_parser.get_res_id_by_key(self.package, 'mipmap', 'ic_launcher')
if res_id:
app_icon = "@%x" % res_id
if not app_icon:
res_id = res_parser.get_res_id_by_key(self.package, 'drawable', 'ic_launcher')
if res_id:
app_icon = "@%x" % res_id
if not app_icon:
# If the icon can not be found, return now
return None
if app_icon.startswith("@"):
res_id = int(app_icon[1:], 16)
candidates = res_parser.get_resolved_res_configs(res_id)
app_icon = None
current_dpi = -1
try:
for config, file_name in candidates:
dpi = config.get_density()
if current_dpi < dpi <= max_dpi:
app_icon = file_name
current_dpi = dpi
except Exception as e:
log.warning("Exception selecting app icon: %s" % e)
return app_icon
def get_package(self):
"""
Return the name of the package
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.package
def get_androidversion_code(self):
"""
Return the android version code
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.androidversion["Code"]
def get_androidversion_name(self):
"""
Return the android version name
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.androidversion["Name"]
def get_files(self):
"""
Return the file names inside the APK.
:rtype: a list of :class:`str`
"""
return self.zip.namelist()
def _get_file_magic_name(self, buffer):
"""
Return the filetype guessed for a buffer
:param buffer: bytes
:return: str of filetype
"""
default = "Unknown"
ftype = None
try:
# Magic is optional
import magic
except ImportError:
return default
try:
# There are several implementations of magic,
# unfortunately all called magic
# We use this one: https://github.com/ahupp/python-magic/
getattr(magic, "MagicException")
except AttributeError:
# Looks like no magic was installed
return default
try:
ftype = magic.from_buffer(buffer[:1024])
except magic.MagicError as e:
log.exception("Error getting the magic type!")
return default
if not ftype:
return default
else:
return self._patch_magic(buffer, ftype)
@property
def files(self):
"""
Returns a dictionary of filenames and detected magic type
:return: dictionary of files and their mime type
"""
return self.get_files_types()
def get_files_types(self):
"""
Return the files inside the APK with their associated types (by using python-magic)
:rtype: a dictionnary
"""
if self._files == {}:
# Generate File Types / CRC List
for i in self.get_files():
buffer = self.zip.read(i)
self.files_crc32[i] = crc32(buffer)
# FIXME why not use the crc from the zipfile?
# should be validated as well.
# crc = self.zip.getinfo(i).CRC
self._files[i] = self._get_file_magic_name(buffer)
return self._files
def _patch_magic(self, buffer, orig):
"""
Overwrite some probably wrong detections by mime libraries
:param buffer: bytes of the file to detect
:param orig: guess by mime libary
:return: corrected guess
"""
if ("Zip" in orig) or ('(JAR)' in orig):
val = is_android_raw(buffer)
if val == "APK":
return "Android application package file"
return orig
def get_files_crc32(self):
"""
Calculates and returns a dictionary of filenames and CRC32
:return: dict of filename: CRC32
"""
if self.files_crc32 == {}:
for i in self.get_files():
buffer = self.zip.read(i)
self.files_crc32[i] = crc32(buffer)
return self.files_crc32
def get_files_information(self):
"""
Return the files inside the APK with their associated types and crc32
:rtype: str, str, int
"""
for k in self.get_files():
yield k, self.get_files_types()[k], self.get_files_crc32()[k]
def get_raw(self):
"""
Return raw bytes of the APK
:rtype: bytes
"""
return self.__raw
def get_file(self, filename):
"""
Return the raw data of the specified filename
inside the APK
:rtype: bytes
"""
try:
return self.zip.read(filename)
except KeyError:
raise FileNotPresent(filename)
def get_dex(self):
"""
Return the raw data of the classes dex file
This will give you the data of the file called `classes.dex`
inside the APK. If the APK has multiple DEX files, you need to use :func:`~APK.get_all_dex`.
:rtype: bytes
"""
try:
return self.get_file("classes.dex")
except FileNotPresent:
return ""
def get_dex_names(self):
"""
Return the names of all DEX files found in the APK.
This method only accounts for "offical" dex files, i.e. all files
in the root directory of the APK named classes.dex or classes[0-9]+.dex
:rtype: a list of str
"""
dexre = re.compile("classes(\d*).dex")
return filter(lambda x: dexre.match(x), self.get_files())
def get_all_dex(self):
"""
Return the raw data of all classes dex files
:rtype: a generator of bytes
"""
for dex_name in self.get_dex_names():
yield self.get_file(dex_name)
def is_multidex(self):
"""
Test if the APK has multiple DEX files
:return: True if multiple dex found, otherwise False
"""
dexre = re.compile("^classes(\d+)?.dex$")
return len([instance for instance in self.get_files() if dexre.search(instance)]) > 1
@DeprecationWarning
def get_elements(self, tag_name, attribute, with_namespace=True):
"""
Deprecated: use `get_all_attribute_value()` instead
Return elements in xml files which match with the tag name and the specific attribute
:param tag_name: a string which specify the tag name
:param attribute: a string which specify the attribute
"""
for i in self.xml:
if self.xml[i] is None:
continue
for item in self.xml[i].findall('.//' + tag_name):
if with_namespace:
value = item.get(self._ns(attribute))
else:
value = item.get(attribute)
# There might be an attribute without the namespace
if value:
yield self._format_value(value)
def _format_value(self, value):
"""
Format a value with packagename, if not already set
:param value:
:return:
"""
if len(value) > 0:
if value[0] == ".":
value = self.package + value
else:
v_dot = value.find(".")
if v_dot == 0:
value = self.package + "." + value
elif v_dot == -1:
value = self.package + "." + value
return value
@DeprecationWarning
def get_all_attribute_value(
self, tag_name, attribute, format_value=True, **attribute_filter
):
"""
Return all the attribute values in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:param format_value: specify if the value needs to be formatted with packagename
:type format_value: boolean
"""
tags = self.find_tags(tag_name, **attribute_filter)
for tag in tags:
value = tag.get(attribute) or tag.get(self._ns(attribute))
if value is not None:
if format_value:
yield self._format_value(value)
else:
yield value
def get_attribute_value(
self, tag_name, attribute, format_value=False, **attribute_filter
):
"""
Return the attribute value in xml files which matches the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:param format_value: specify if the value needs to be formatted with packagename
:type format_value: boolean
"""
for value in self.get_all_attribute_value(
tag_name, attribute, format_value, **attribute_filter):
if value is not None:
return value
def get_value_from_tag(self, tag, attribute):
"""
Return the value of the attribute in a specific tag
:param tag: specify the tag element
:type tag: Element
:param attribute: specify the attribute
:type attribute: string
"""
# TODO: figure out if both android:name and name tag exist which one to give preference
value = tag.get(self._ns(attribute))
if value is None:
log.warning("Failed to get the attribute with namespace")
value = tag.get(attribute)
return value
def find_tags(self, tag_name, **attribute_filter):
"""
Return a list of all the matched tags in all available xml
:param tag: specify the tag name
:type tag: string
"""
all_tags = [
self.find_tags_from_xml(
i, tag_name, **attribute_filter
)
for i in self.xml
]
return [tag for tag_list in all_tags for tag in tag_list]
def find_tags_from_xml(
self, xml_name, tag_name, **attribute_filter
):
"""
Return a list of all the matched tags in a specific xml
:param xml_name: specify from which xml to pick the tag from
:type xml_name: string
:param tag_name: specify the tag name
:type tag_name: string
"""
xml = self.xml[xml_name]
if xml is None:
return []
if xml.tag == tag_name:
if self.is_tag_matched(
xml.tag, **attribute_filter
):
return [xml]
return []
tags = xml.findall(".//" + tag_name)
return [
tag for tag in tags if self.is_tag_matched(
tag, **attribute_filter
)
]
def is_tag_matched(self, tag, **attribute_filter):
"""
Return true if the attributes matches in attribute filter
:param tag: specify the tag element
:type tag: Element
:param attribute: specify the attribute
:type attribute: string
"""
if len(attribute_filter) <= 0:
return True
for attr, value in attribute_filter.items():
# TODO: figure out if both android:name and name tag exist which one to give preference
_value = tag.get(self._ns(attr))
if _value is None:
log.warning("Failed to get the attribute with namespace")
_value = tag.get(attr)
if _value != value:
return False
return True
def get_main_activities(self):
"""
Return names of the main activities
These values are read from the AndroidManifest.xml
:rtype: a set of str
"""
x = set()
y = set()
for i in self.xml:
if self.xml[i] is None:
continue
activities_and_aliases = self.xml[i].findall(".//activity") + \
self.xml[i].findall(".//activity-alias")
for item in activities_and_aliases:
# Some applications have more than one MAIN activity.
# For example: paid and free content
activityEnabled = item.get(self._ns("enabled"))
if activityEnabled == "false":
continue
for sitem in item.findall(".//action"):
val = sitem.get(self._ns("name"))
if val == "android.intent.action.MAIN":
activity = item.get(self._ns("name"))
if activity is not None:
x.add(item.get(self._ns("name")))
else:
log.warning('Main activity without name')
for sitem in item.findall(".//category"):
val = sitem.get(self._ns("name"))
if val == "android.intent.category.LAUNCHER":
activity = item.get(self._ns("name"))
if activity is not None:
y.add(item.get(self._ns("name")))
else:
log.warning('Launcher activity without name')
return x.intersection(y)
def get_main_activity(self):
"""
Return the name of the main activity
This value is read from the AndroidManifest.xml
:rtype: str
"""
activities = self.get_main_activities()
if len(activities) > 0:
return self._format_value(activities.pop())
return None
def get_activities(self):
"""
Return the android:name attribute of all activities
:rtype: a list of str
"""
return list(self.get_all_attribute_value("activity", "name"))
def get_services(self):
"""
Return the android:name attribute of all services
:rtype: a list of str
"""
return list(self.get_all_attribute_value("service", "name"))
def get_receivers(self):
"""
Return the android:name attribute of all receivers
:rtype: a list of string
"""
return list(self.get_all_attribute_value("receiver", "name"))
def get_providers(self):
"""
Return the android:name attribute of all providers
:rtype: a list of string
"""
return list(self.get_all_attribute_value("provider", "name"))
def get_intent_filters(self, itemtype, name):
"""
Find intent filters for a given item and name.
Intent filter are attached to activities, services or receivers.
You can search for the intent filters of such items and get a dictionary of all
attached actions and intent categories.
:param itemtype: the type of parent item to look for, e.g. `activity`, `service` or `receiver`
:param name: the `android:name` of the parent item, e.g. activity name
:return: a dictionary with the keys `action` and `category` containing the `android:name` of those items
"""
d = {"action": [], "category": []}
for i in self.xml:
# TODO: this can probably be solved using a single xpath
for item in self.xml[i].findall(".//" + itemtype):
if self._format_value(item.get(self._ns("name"))) == name:
for sitem in item.findall(".//intent-filter"):
for ssitem in sitem.findall("action"):
if ssitem.get(self._ns("name")) not in d["action"]:
d["action"].append(ssitem.get(self._ns("name")))
for ssitem in sitem.findall("category"):
if ssitem.get(self._ns("name")) not in d["category"]:
d["category"].append(ssitem.get(self._ns("name")))
if not d["action"]:
del d["action"]
if not d["category"]:
del d["category"]
return d
def get_permissions(self):
"""
Return permissions names declared in the AndroidManifest.xml.
It is possible that permissions are returned multiple times,
as this function does not filter the permissions, i.e. it shows you
exactly what was defined in the AndroidManifest.xml.
Implied permissions, which are granted automatically, are not returned
here. Use :meth:`get_uses_implied_permission_list` if you need a list
of implied permissions.
:returns: A list of permissions
:rtype: list
"""
return self.permissions
def get_uses_implied_permission_list(self):
"""
Return all permissions implied by the target SDK or other permissions.
:rtype: list of string
"""
target_sdk_version = self.get_effective_target_sdk_version()
READ_CALL_LOG = 'android.permission.READ_CALL_LOG'
READ_CONTACTS = 'android.permission.READ_CONTACTS'
READ_EXTERNAL_STORAGE = 'android.permission.READ_EXTERNAL_STORAGE'
READ_PHONE_STATE = 'android.permission.READ_PHONE_STATE'
WRITE_CALL_LOG = 'android.permission.WRITE_CALL_LOG'
WRITE_CONTACTS = 'android.permission.WRITE_CONTACTS'
WRITE_EXTERNAL_STORAGE = 'android.permission.WRITE_EXTERNAL_STORAGE'
implied = []
implied_WRITE_EXTERNAL_STORAGE = False
if target_sdk_version < 4:
if WRITE_EXTERNAL_STORAGE not in self.permissions:
implied.append([WRITE_EXTERNAL_STORAGE, None])
implied_WRITE_EXTERNAL_STORAGE = True
if READ_PHONE_STATE not in self.permissions:
implied.append([READ_PHONE_STATE, None])
if (WRITE_EXTERNAL_STORAGE in self.permissions or implied_WRITE_EXTERNAL_STORAGE) \
and READ_EXTERNAL_STORAGE not in self.permissions:
maxSdkVersion = None
for name, version in self.uses_permissions:
if name == WRITE_EXTERNAL_STORAGE:
maxSdkVersion = version
break
implied.append([READ_EXTERNAL_STORAGE, maxSdkVersion])
if target_sdk_version < 16:
if READ_CONTACTS in self.permissions \
and READ_CALL_LOG not in self.permissions:
implied.append([READ_CALL_LOG, None])
if WRITE_CONTACTS in self.permissions \
and WRITE_CALL_LOG not in self.permissions:
implied.append([WRITE_CALL_LOG, None])
return implied
def get_details_permissions(self):
"""
Return permissions with details
:rtype: dict of {permission: [protectionLevel, label, description]}
"""
l = {}
for i in self.permissions:
if i in self.permission_module:
x = self.permission_module[i]
l[i] = [x["protectionLevel"], x["label"], x["description"]]
else:
# FIXME: the permission might be signature, if it is defined by the app itself!
l[i] = ["normal", "Unknown permission from android reference",
"Unknown permission from android reference"]
return l
@DeprecationWarning
def get_requested_permissions(self):
"""
Returns all requested permissions.
It has the same result as :meth:`get_permissions` and might be removed in the future
:rtype: list of str
"""
return self.get_permissions()
def get_requested_aosp_permissions(self):
"""
Returns requested permissions declared within AOSP project.
This includes several other permissions as well, which are in the platform apps.
:rtype: list of str
"""
aosp_permissions = []
all_permissions = self.get_permissions()
for perm in all_permissions:
if perm in list(self.permission_module.keys()):
aosp_permissions.append(perm)
return aosp_permissions
def get_requested_aosp_permissions_details(self):
"""
Returns requested aosp permissions with details.
:rtype: dictionary
"""
l = {}
for i in self.permissions:
try:
l[i] = self.permission_module[i]
except KeyError:
# if we have not found permission do nothing
continue
return l
def get_requested_third_party_permissions(self):
"""
Returns list of requested permissions not declared within AOSP project.
:rtype: list of strings
"""
third_party_permissions = []
all_permissions = self.get_permissions()
for perm in all_permissions:
if perm not in list(self.permission_module.keys()):
third_party_permissions.append(perm)
return third_party_permissions
def get_declared_permissions(self):
"""
Returns list of the declared permissions.
:rtype: list of strings
"""
return list(self.declared_permissions.keys())
def get_declared_permissions_details(self):
"""
Returns declared permissions with the details.
:rtype: dict
"""
return self.declared_permissions
def get_max_sdk_version(self):
"""
Return the android:maxSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "maxSdkVersion")
def get_min_sdk_version(self):
"""
Return the android:minSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "minSdkVersion")
def get_target_sdk_version(self):
"""
Return the android:targetSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "targetSdkVersion")
def get_effective_target_sdk_version(self):
"""
Return the effective targetSdkVersion, always returns int > 0.
If the targetSdkVersion is not set, it defaults to 1. This is
set based on defaults as defined in:
https://developer.android.com/guide/topics/manifest/uses-sdk-element.html
:rtype: int
"""
target_sdk_version = self.get_target_sdk_version()
if not target_sdk_version:
target_sdk_version = self.get_min_sdk_version()
try:
return int(target_sdk_version)
except (ValueError, TypeError):
return 1
def get_libraries(self):
"""
Return the android:name attributes for libraries
:rtype: list
"""
return list(self.get_all_attribute_value("uses-library", "name"))
def get_features(self):
"""
Return a list of all android:names found for the tag uses-feature
in the AndroidManifest.xml
:return: list
"""
return list(self.get_all_attribute_value("uses-feature", "name"))
def is_wearable(self):
"""
Checks if this application is build for wearables by
checking if it uses the feature 'android.hardware.type.watch'
See: https://developer.android.com/training/wearables/apps/creating.html for more information.
Not every app is setting this feature (not even the example Google provides),
so it might be wise to not 100% rely on this feature.
:return: True if wearable, False otherwise
"""
return 'android.hardware.type.watch' in self.get_features()
def is_leanback(self):
"""
Checks if this application is build for TV (Leanback support)
by checkin if it uses the feature 'android.software.leanback'
:return: True if leanback feature is used, false otherwise
"""
return 'android.software.leanback' in self.get_features()
def is_androidtv(self):
"""
Checks if this application does not require a touchscreen,
as this is the rule to get into the TV section of the Play Store
See: https://developer.android.com/training/tv/start/start.html for more information.
:return: True if 'android.hardware.touchscreen' is not required, False otherwise
"""
return self.get_attribute_value(
'uses-feature', 'name', required="false",
name="android.hardware.touchscreen"
) == "android.hardware.touchscreen"
def new_zip(self, filename, deleted_files=None, new_files={}):
"""
Create a new zip file
:param filename: the output filename of the zip
:param deleted_files: a regex pattern to remove specific file
:param new_files: a dictionnary of new files
:type filename: string
:type deleted_files: None or a string
:type new_files: a dictionnary (key:filename, value:content of the file)
"""
zout = zipfile.ZipFile(filename, 'w')
for item in self.zip.infolist():
# Block one: deleted_files, or deleted_files and new_files
if deleted_files is not None:
if re.match(deleted_files, item.filename) is None:
# if the regex of deleted_files doesn't match the filename
if new_files is not False:
if item.filename in new_files:
# and if the filename is in new_files
zout.writestr(item, new_files[item.filename])
continue
# Otherwise, write the original file.
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
# Block two: deleted_files is None, new_files is not empty
elif new_files is not False:
if item.filename in new_files:
zout.writestr(item, new_files[item.filename])
else:
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
# Block three: deleted_files is None, new_files is empty.
# Just write out the default zip
else:
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
zout.close()
def get_android_manifest_axml(self):
"""
Return the :class:`AXMLPrinter` object which corresponds to the AndroidManifest.xml file
:rtype: :class:`~androguard.core.bytecodes.axml.AXMLPrinter`
"""
try:
return self.axml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_manifest_xml(self):
"""
Return the parsed xml object which corresponds to the AndroidManifest.xml file
:rtype: :class:`~lxml.etree.Element`
"""
try:
return self.xml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_resources(self):
"""
Return the :class:`~androguard.core.bytecodes.axml.ARSCParser`
object which corresponds to the resources.arsc file
:rtype: :class:`~androguard.core.bytecodes.axml.ARSCParser`
"""
try:
return self.arsc["resources.arsc"]
except KeyError:
if "resources.arsc" not in self.zip.namelist():
# There is a rare case, that no resource file is supplied.
# Maybe it was added manually, thus we check here
return None
self.arsc["resources.arsc"] = ARSCParser(self.zip.read("resources.arsc"))
return self.arsc["resources.arsc"]
def show(self):
self.get_files_types()
print("FILES: ")
for i in self.get_files():
try:
print("\t", i, self._files[i], "%x" % self.files_crc32[i])
except KeyError:
print("\t", i, "%x" % self.files_crc32[i])
print("DECLARED PERMISSIONS:")
declared_permissions = self.get_declared_permissions()
for i in declared_permissions:
print("\t", i)
print("REQUESTED PERMISSIONS:")
requested_permissions = self.get_permissions()
for i in requested_permissions:
print("\t", i)
print("MAIN ACTIVITY: ", self.get_main_activity())
print("ACTIVITIES: ")
activities = self.get_activities()
for i in activities:
filters = self.get_intent_filters("activity", i)
print("\t", i, filters or "")
print("SERVICES: ")
services = self.get_services()
for i in services:
filters = self.get_intent_filters("service", i)
print("\t", i, filters or "")
print("RECEIVERS: ")
receivers = self.get_receivers()
for i in receivers:
filters = self.get_intent_filters("receiver", i)
print("\t", i, filters or "")
print("PROVIDERS: ", self.get_providers())
@property
def application(self):
return self.get_app_name()
@property
def packagename(self):
return self.get_package()
@property
def version_name(self):
return self.get_androidversion_name()
@property
def version_code(self):
return self.get_androidversion_code()
@property
def icon_info(self):
return self.get_app_icon()
@property
def icon_data(self):
app_icon_file = self.get_app_icon()
app_icon_data = None
try:
app_icon_data = self.get_file(app_icon_file)
except FileNotPresent:
try:
app_icon_data = self.get_file(app_icon_file.encode().decode('cp437'))
except FileNotPresent:
pass
return app_icon_data
|
appknox/pyaxmlparser | pyaxmlparser/core.py | APK.is_tag_matched | python | def is_tag_matched(self, tag, **attribute_filter):
if len(attribute_filter) <= 0:
return True
for attr, value in attribute_filter.items():
# TODO: figure out if both android:name and name tag exist which one to give preference
_value = tag.get(self._ns(attr))
if _value is None:
log.warning("Failed to get the attribute with namespace")
_value = tag.get(attr)
if _value != value:
return False
return True | Return true if the attributes matches in attribute filter
:param tag: specify the tag element
:type tag: Element
:param attribute: specify the attribute
:type attribute: string | train | https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/core.py#L780-L798 | [
"def _ns(self, name):\n \"\"\"\n return the name including the Android namespace\n \"\"\"\n return NS_ANDROID + name\n"
] | class APK(object):
def __init__(self, filename, raw=False, magic_file=None, skip_analysis=False, testzip=False):
"""
This class can access to all elements in an APK file
example::
APK("myfile.apk")
APK(read("myfile.apk"), raw=True)
:param filename: specify the path of the file, or raw data
:param raw: specify if the filename is a path or raw data (optional)
:param magic_file: specify the magic file (not used anymore - legacy only)
:param skip_analysis: Skip the analysis, e.g. no manifest files are read. (default: False)
:param testzip: Test the APK for integrity, e.g. if the ZIP file is broken.
Throw an exception on failure (default False)
:type filename: string
:type raw: boolean
:type magic_file: string
:type skip_analysis: boolean
:type testzip: boolean
"""
if magic_file:
log.warning("You set magic_file but this parameter is actually unused. You should remove it.")
self.filename = filename
self.xml = {}
self.axml = {}
self.arsc = {}
self.package = ""
self.androidversion = {}
self.permissions = []
self.uses_permissions = []
self.declared_permissions = {}
self.valid_apk = False
self._files = {}
self.files_crc32 = {}
if raw is True:
self.__raw = bytearray(filename)
self._sha256 = hashlib.sha256(self.__raw).hexdigest()
# Set the filename to something sane
self.filename = "raw_apk_sha256:{}".format(self._sha256)
else:
self.__raw = bytearray(read(filename))
self.zip = zipfile.ZipFile(io.BytesIO(self.__raw), mode="r")
if testzip:
# Test the zipfile for integrity before continuing.
# This process might be slow, as the whole file is read.
# Therefore it is possible to enable it as a separate feature.
#
# A short benchmark showed, that testing the zip takes about 10 times longer!
# e.g. normal zip loading (skip_analysis=True) takes about 0.01s, where
# testzip takes 0.1s!
ret = self.zip.testzip()
if ret is not None:
# we could print the filename here, but there are zip which are so broken
# That the filename is either very very long or does not make any sense.
# Thus we do not do it, the user might find out by using other tools.
raise BrokenAPKError("The APK is probably broken: testzip returned an error.")
if not skip_analysis:
self._apk_analysis()
def _ns(self, name):
"""
return the name including the Android namespace
"""
return NS_ANDROID + name
def _apk_analysis(self):
"""
Run analysis on the APK file.
This method is usually called by __init__ except if skip_analysis is False.
It will then parse the AndroidManifest.xml and set all fields in the APK class which can be
extracted from the Manifest.
"""
i = "AndroidManifest.xml"
try:
manifest_data = self.zip.read(i)
except KeyError:
log.warning("Missing AndroidManifest.xml. Is this an APK file?")
else:
ap = AXMLPrinter(manifest_data)
if not ap.is_valid():
log.error("Error while parsing AndroidManifest.xml - is the file valid?")
return
self.axml[i] = ap
self.xml[i] = self.axml[i].get_xml_obj()
if self.axml[i].is_packed():
log.warning("XML Seems to be packed, operations on the AndroidManifest.xml might fail.")
if self.xml[i] is not None:
if self.xml[i].tag != "manifest":
log.error("AndroidManifest.xml does not start with a <manifest> tag! Is this a valid APK?")
return
self.package = self.get_attribute_value("manifest", "package")
self.androidversion["Code"] = self.get_attribute_value("manifest", "versionCode")
self.androidversion["Name"] = self.get_attribute_value("manifest", "versionName")
permission = list(self.get_all_attribute_value("uses-permission", "name"))
self.permissions = list(set(self.permissions + permission))
for uses_permission in self.find_tags("uses-permission"):
self.uses_permissions.append([
self.get_value_from_tag(uses_permission, "name"),
self._get_permission_maxsdk(uses_permission)
])
# getting details of the declared permissions
for d_perm_item in self.find_tags('permission'):
d_perm_name = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "name")))
d_perm_label = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "label")))
d_perm_description = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "description")))
d_perm_permissionGroup = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "permissionGroup")))
d_perm_protectionLevel = self._get_res_string_value(
str(self.get_value_from_tag(d_perm_item, "protectionLevel")))
d_perm_details = {
"label": d_perm_label,
"description": d_perm_description,
"permissionGroup": d_perm_permissionGroup,
"protectionLevel": d_perm_protectionLevel,
}
self.declared_permissions[d_perm_name] = d_perm_details
self.valid_apk = True
def __getstate__(self):
"""
Function for pickling APK Objects.
We remove the zip from the Object, as it is not pickable
And it does not make any sense to pickle it anyways.
:return: the picklable APK Object without zip.
"""
# Upon pickling, we need to remove the ZipFile
x = self.__dict__
x['axml'] = str(x['axml'])
x['xml'] = str(x['xml'])
del x['zip']
return x
def __setstate__(self, state):
"""
Load a pickled APK Object and restore the state
We load the zip file back by reading __raw from the Object.
:param state: pickled state
"""
self.__dict__ = state
self.zip = zipfile.ZipFile(io.BytesIO(self.__raw), mode="r")
def _get_res_string_value(self, string):
if not string.startswith('@string/'):
return string
string_key = string[9:]
res_parser = self.get_android_resources()
if not res_parser:
return ''
string_value = ''
for package_name in res_parser.get_packages_names():
extracted_values = res_parser.get_string(package_name, string_key)
if extracted_values:
string_value = extracted_values[1]
break
return string_value
def _get_permission_maxsdk(self, item):
maxSdkVersion = None
try:
maxSdkVersion = int(self.get_value_from_tag(item, "maxSdkVersion"))
except ValueError:
log.warning(self.get_max_sdk_version() + 'is not a valid value for <uses-permission> maxSdkVersion')
except TypeError:
pass
return maxSdkVersion
def is_valid_APK(self):
"""
Return true if the APK is valid, false otherwise.
An APK is seen as valid, if the AndroidManifest.xml could be successful parsed.
This does not mean that the APK has a valid signature nor that the APK
can be installed on an Android system.
:rtype: boolean
"""
return self.valid_apk
def get_filename(self):
"""
Return the filename of the APK
:rtype: :class:`str`
"""
return self.filename
def get_app_name(self):
"""
Return the appname of the APK
This name is read from the AndroidManifest.xml
using the application android:label.
If no label exists, the android:label of the main activity is used.
If there is also no main activity label, an empty string is returned.
:rtype: :class:`str`
"""
app_name = self.get_attribute_value('application', 'label')
if app_name is None:
activities = self.get_main_activities()
main_activity_name = None
if len(activities) > 0:
main_activity_name = activities.pop()
app_name = self.get_attribute_value(
'activity', 'label', name=main_activity_name
)
if app_name is None:
# No App name set
# TODO return packagename instead?
log.warning("It looks like that no app name is set for the main activity!")
return ""
if app_name.startswith("@"):
res_parser = self.get_android_resources()
if not res_parser:
# TODO: What should be the correct return value here?
return app_name
res_id, package = res_parser.parse_id(app_name)
# If the package name is the same as the APK package,
# we should be able to resolve the ID.
if package and package != self.get_package():
if package == 'android':
# TODO: we can not resolve this, as we lack framework-res.apk
# one exception would be when parsing framework-res.apk directly.
log.warning("Resource ID with android package name encountered! "
"Will not resolve, framework-res.apk would be required.")
return app_name
else:
# TODO should look this up, might be in the resources
log.warning("Resource ID with Package name '{}' encountered! Will not resolve".format(package))
return app_name
try:
app_name = res_parser.get_resolved_res_configs(
res_id,
ARSCResTableConfig.default_config())[0][1]
except Exception as e:
log.warning("Exception selecting app name: %s" % e)
return app_name
def get_app_icon(self, max_dpi=65536):
"""
Return the first icon file name, which density is not greater than max_dpi,
unless exact icon resolution is set in the manifest, in which case
return the exact file.
This information is read from the AndroidManifest.xml
From https://developer.android.com/guide/practices/screens_support.html
and https://developer.android.com/ndk/reference/group___configuration.html
* DEFAULT 0dpi
* ldpi (low) 120dpi
* mdpi (medium) 160dpi
* TV 213dpi
* hdpi (high) 240dpi
* xhdpi (extra-high) 320dpi
* xxhdpi (extra-extra-high) 480dpi
* xxxhdpi (extra-extra-extra-high) 640dpi
* anydpi 65534dpi (0xFFFE)
* nodpi 65535dpi (0xFFFF)
There is a difference between nodpi and anydpi:
nodpi will be used if no other density is specified. Or the density does not match.
nodpi is the fallback for everything else. If there is a resource that matches the DPI,
this is used.
anydpi is also valid for all densities but in this case, anydpi will overrule all other files!
Therefore anydpi is usually used with vector graphics and with constraints on the API level.
For example adaptive icons are usually marked as anydpi.
When it comes now to selecting an icon, there is the following flow:
1) is there an anydpi icon?
2) is there an icon for the dpi of the device?
3) is there a nodpi icon?
4) (only on very old devices) is there a icon with dpi 0 (the default)
For more information read here: https://stackoverflow.com/a/34370735/446140
:rtype: :class:`str`
"""
main_activity_name = self.get_main_activity()
app_icon = self.get_attribute_value(
'activity', 'icon', name=main_activity_name)
if not app_icon:
app_icon = self.get_attribute_value('application', 'icon')
res_parser = self.get_android_resources()
if not res_parser:
# Can not do anything below this point to resolve...
return None
if not app_icon:
res_id = res_parser.get_res_id_by_key(self.package, 'mipmap', 'ic_launcher')
if res_id:
app_icon = "@%x" % res_id
if not app_icon:
res_id = res_parser.get_res_id_by_key(self.package, 'drawable', 'ic_launcher')
if res_id:
app_icon = "@%x" % res_id
if not app_icon:
# If the icon can not be found, return now
return None
if app_icon.startswith("@"):
res_id = int(app_icon[1:], 16)
candidates = res_parser.get_resolved_res_configs(res_id)
app_icon = None
current_dpi = -1
try:
for config, file_name in candidates:
dpi = config.get_density()
if current_dpi < dpi <= max_dpi:
app_icon = file_name
current_dpi = dpi
except Exception as e:
log.warning("Exception selecting app icon: %s" % e)
return app_icon
def get_package(self):
"""
Return the name of the package
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.package
def get_androidversion_code(self):
"""
Return the android version code
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.androidversion["Code"]
def get_androidversion_name(self):
"""
Return the android version name
This information is read from the AndroidManifest.xml
:rtype: :class:`str`
"""
return self.androidversion["Name"]
def get_files(self):
"""
Return the file names inside the APK.
:rtype: a list of :class:`str`
"""
return self.zip.namelist()
def _get_file_magic_name(self, buffer):
"""
Return the filetype guessed for a buffer
:param buffer: bytes
:return: str of filetype
"""
default = "Unknown"
ftype = None
try:
# Magic is optional
import magic
except ImportError:
return default
try:
# There are several implementations of magic,
# unfortunately all called magic
# We use this one: https://github.com/ahupp/python-magic/
getattr(magic, "MagicException")
except AttributeError:
# Looks like no magic was installed
return default
try:
ftype = magic.from_buffer(buffer[:1024])
except magic.MagicError as e:
log.exception("Error getting the magic type!")
return default
if not ftype:
return default
else:
return self._patch_magic(buffer, ftype)
@property
def files(self):
"""
Returns a dictionary of filenames and detected magic type
:return: dictionary of files and their mime type
"""
return self.get_files_types()
def get_files_types(self):
"""
Return the files inside the APK with their associated types (by using python-magic)
:rtype: a dictionnary
"""
if self._files == {}:
# Generate File Types / CRC List
for i in self.get_files():
buffer = self.zip.read(i)
self.files_crc32[i] = crc32(buffer)
# FIXME why not use the crc from the zipfile?
# should be validated as well.
# crc = self.zip.getinfo(i).CRC
self._files[i] = self._get_file_magic_name(buffer)
return self._files
def _patch_magic(self, buffer, orig):
"""
Overwrite some probably wrong detections by mime libraries
:param buffer: bytes of the file to detect
:param orig: guess by mime libary
:return: corrected guess
"""
if ("Zip" in orig) or ('(JAR)' in orig):
val = is_android_raw(buffer)
if val == "APK":
return "Android application package file"
return orig
def get_files_crc32(self):
"""
Calculates and returns a dictionary of filenames and CRC32
:return: dict of filename: CRC32
"""
if self.files_crc32 == {}:
for i in self.get_files():
buffer = self.zip.read(i)
self.files_crc32[i] = crc32(buffer)
return self.files_crc32
def get_files_information(self):
"""
Return the files inside the APK with their associated types and crc32
:rtype: str, str, int
"""
for k in self.get_files():
yield k, self.get_files_types()[k], self.get_files_crc32()[k]
def get_raw(self):
"""
Return raw bytes of the APK
:rtype: bytes
"""
return self.__raw
def get_file(self, filename):
"""
Return the raw data of the specified filename
inside the APK
:rtype: bytes
"""
try:
return self.zip.read(filename)
except KeyError:
raise FileNotPresent(filename)
def get_dex(self):
"""
Return the raw data of the classes dex file
This will give you the data of the file called `classes.dex`
inside the APK. If the APK has multiple DEX files, you need to use :func:`~APK.get_all_dex`.
:rtype: bytes
"""
try:
return self.get_file("classes.dex")
except FileNotPresent:
return ""
def get_dex_names(self):
"""
Return the names of all DEX files found in the APK.
This method only accounts for "offical" dex files, i.e. all files
in the root directory of the APK named classes.dex or classes[0-9]+.dex
:rtype: a list of str
"""
dexre = re.compile("classes(\d*).dex")
return filter(lambda x: dexre.match(x), self.get_files())
def get_all_dex(self):
"""
Return the raw data of all classes dex files
:rtype: a generator of bytes
"""
for dex_name in self.get_dex_names():
yield self.get_file(dex_name)
def is_multidex(self):
"""
Test if the APK has multiple DEX files
:return: True if multiple dex found, otherwise False
"""
dexre = re.compile("^classes(\d+)?.dex$")
return len([instance for instance in self.get_files() if dexre.search(instance)]) > 1
@DeprecationWarning
def get_elements(self, tag_name, attribute, with_namespace=True):
"""
Deprecated: use `get_all_attribute_value()` instead
Return elements in xml files which match with the tag name and the specific attribute
:param tag_name: a string which specify the tag name
:param attribute: a string which specify the attribute
"""
for i in self.xml:
if self.xml[i] is None:
continue
for item in self.xml[i].findall('.//' + tag_name):
if with_namespace:
value = item.get(self._ns(attribute))
else:
value = item.get(attribute)
# There might be an attribute without the namespace
if value:
yield self._format_value(value)
def _format_value(self, value):
"""
Format a value with packagename, if not already set
:param value:
:return:
"""
if len(value) > 0:
if value[0] == ".":
value = self.package + value
else:
v_dot = value.find(".")
if v_dot == 0:
value = self.package + "." + value
elif v_dot == -1:
value = self.package + "." + value
return value
@DeprecationWarning
def get_element(self, tag_name, attribute, **attribute_filter):
"""
:Deprecated: use `get_attribute_value()` instead
Return element in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:rtype: string
"""
for i in self.xml:
if self.xml[i] is None:
continue
tag = self.xml[i].findall('.//' + tag_name)
if len(tag) == 0:
return None
for item in tag:
skip_this_item = False
for attr, val in list(attribute_filter.items()):
attr_val = item.get(self._ns(attr))
if attr_val != val:
skip_this_item = True
break
if skip_this_item:
continue
value = item.get(self._ns(attribute))
if value is not None:
return value
return None
def get_all_attribute_value(
self, tag_name, attribute, format_value=True, **attribute_filter
):
"""
Return all the attribute values in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:param format_value: specify if the value needs to be formatted with packagename
:type format_value: boolean
"""
tags = self.find_tags(tag_name, **attribute_filter)
for tag in tags:
value = tag.get(attribute) or tag.get(self._ns(attribute))
if value is not None:
if format_value:
yield self._format_value(value)
else:
yield value
def get_attribute_value(
self, tag_name, attribute, format_value=False, **attribute_filter
):
"""
Return the attribute value in xml files which matches the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:param format_value: specify if the value needs to be formatted with packagename
:type format_value: boolean
"""
for value in self.get_all_attribute_value(
tag_name, attribute, format_value, **attribute_filter):
if value is not None:
return value
def get_value_from_tag(self, tag, attribute):
"""
Return the value of the attribute in a specific tag
:param tag: specify the tag element
:type tag: Element
:param attribute: specify the attribute
:type attribute: string
"""
# TODO: figure out if both android:name and name tag exist which one to give preference
value = tag.get(self._ns(attribute))
if value is None:
log.warning("Failed to get the attribute with namespace")
value = tag.get(attribute)
return value
def find_tags(self, tag_name, **attribute_filter):
"""
Return a list of all the matched tags in all available xml
:param tag: specify the tag name
:type tag: string
"""
all_tags = [
self.find_tags_from_xml(
i, tag_name, **attribute_filter
)
for i in self.xml
]
return [tag for tag_list in all_tags for tag in tag_list]
def find_tags_from_xml(
self, xml_name, tag_name, **attribute_filter
):
"""
Return a list of all the matched tags in a specific xml
:param xml_name: specify from which xml to pick the tag from
:type xml_name: string
:param tag_name: specify the tag name
:type tag_name: string
"""
xml = self.xml[xml_name]
if xml is None:
return []
if xml.tag == tag_name:
if self.is_tag_matched(
xml.tag, **attribute_filter
):
return [xml]
return []
tags = xml.findall(".//" + tag_name)
return [
tag for tag in tags if self.is_tag_matched(
tag, **attribute_filter
)
]
def get_main_activities(self):
"""
Return names of the main activities
These values are read from the AndroidManifest.xml
:rtype: a set of str
"""
x = set()
y = set()
for i in self.xml:
if self.xml[i] is None:
continue
activities_and_aliases = self.xml[i].findall(".//activity") + \
self.xml[i].findall(".//activity-alias")
for item in activities_and_aliases:
# Some applications have more than one MAIN activity.
# For example: paid and free content
activityEnabled = item.get(self._ns("enabled"))
if activityEnabled == "false":
continue
for sitem in item.findall(".//action"):
val = sitem.get(self._ns("name"))
if val == "android.intent.action.MAIN":
activity = item.get(self._ns("name"))
if activity is not None:
x.add(item.get(self._ns("name")))
else:
log.warning('Main activity without name')
for sitem in item.findall(".//category"):
val = sitem.get(self._ns("name"))
if val == "android.intent.category.LAUNCHER":
activity = item.get(self._ns("name"))
if activity is not None:
y.add(item.get(self._ns("name")))
else:
log.warning('Launcher activity without name')
return x.intersection(y)
def get_main_activity(self):
"""
Return the name of the main activity
This value is read from the AndroidManifest.xml
:rtype: str
"""
activities = self.get_main_activities()
if len(activities) > 0:
return self._format_value(activities.pop())
return None
def get_activities(self):
"""
Return the android:name attribute of all activities
:rtype: a list of str
"""
return list(self.get_all_attribute_value("activity", "name"))
def get_services(self):
"""
Return the android:name attribute of all services
:rtype: a list of str
"""
return list(self.get_all_attribute_value("service", "name"))
def get_receivers(self):
"""
Return the android:name attribute of all receivers
:rtype: a list of string
"""
return list(self.get_all_attribute_value("receiver", "name"))
def get_providers(self):
"""
Return the android:name attribute of all providers
:rtype: a list of string
"""
return list(self.get_all_attribute_value("provider", "name"))
def get_intent_filters(self, itemtype, name):
"""
Find intent filters for a given item and name.
Intent filter are attached to activities, services or receivers.
You can search for the intent filters of such items and get a dictionary of all
attached actions and intent categories.
:param itemtype: the type of parent item to look for, e.g. `activity`, `service` or `receiver`
:param name: the `android:name` of the parent item, e.g. activity name
:return: a dictionary with the keys `action` and `category` containing the `android:name` of those items
"""
d = {"action": [], "category": []}
for i in self.xml:
# TODO: this can probably be solved using a single xpath
for item in self.xml[i].findall(".//" + itemtype):
if self._format_value(item.get(self._ns("name"))) == name:
for sitem in item.findall(".//intent-filter"):
for ssitem in sitem.findall("action"):
if ssitem.get(self._ns("name")) not in d["action"]:
d["action"].append(ssitem.get(self._ns("name")))
for ssitem in sitem.findall("category"):
if ssitem.get(self._ns("name")) not in d["category"]:
d["category"].append(ssitem.get(self._ns("name")))
if not d["action"]:
del d["action"]
if not d["category"]:
del d["category"]
return d
def get_permissions(self):
"""
Return permissions names declared in the AndroidManifest.xml.
It is possible that permissions are returned multiple times,
as this function does not filter the permissions, i.e. it shows you
exactly what was defined in the AndroidManifest.xml.
Implied permissions, which are granted automatically, are not returned
here. Use :meth:`get_uses_implied_permission_list` if you need a list
of implied permissions.
:returns: A list of permissions
:rtype: list
"""
return self.permissions
def get_uses_implied_permission_list(self):
"""
Return all permissions implied by the target SDK or other permissions.
:rtype: list of string
"""
target_sdk_version = self.get_effective_target_sdk_version()
READ_CALL_LOG = 'android.permission.READ_CALL_LOG'
READ_CONTACTS = 'android.permission.READ_CONTACTS'
READ_EXTERNAL_STORAGE = 'android.permission.READ_EXTERNAL_STORAGE'
READ_PHONE_STATE = 'android.permission.READ_PHONE_STATE'
WRITE_CALL_LOG = 'android.permission.WRITE_CALL_LOG'
WRITE_CONTACTS = 'android.permission.WRITE_CONTACTS'
WRITE_EXTERNAL_STORAGE = 'android.permission.WRITE_EXTERNAL_STORAGE'
implied = []
implied_WRITE_EXTERNAL_STORAGE = False
if target_sdk_version < 4:
if WRITE_EXTERNAL_STORAGE not in self.permissions:
implied.append([WRITE_EXTERNAL_STORAGE, None])
implied_WRITE_EXTERNAL_STORAGE = True
if READ_PHONE_STATE not in self.permissions:
implied.append([READ_PHONE_STATE, None])
if (WRITE_EXTERNAL_STORAGE in self.permissions or implied_WRITE_EXTERNAL_STORAGE) \
and READ_EXTERNAL_STORAGE not in self.permissions:
maxSdkVersion = None
for name, version in self.uses_permissions:
if name == WRITE_EXTERNAL_STORAGE:
maxSdkVersion = version
break
implied.append([READ_EXTERNAL_STORAGE, maxSdkVersion])
if target_sdk_version < 16:
if READ_CONTACTS in self.permissions \
and READ_CALL_LOG not in self.permissions:
implied.append([READ_CALL_LOG, None])
if WRITE_CONTACTS in self.permissions \
and WRITE_CALL_LOG not in self.permissions:
implied.append([WRITE_CALL_LOG, None])
return implied
def get_details_permissions(self):
"""
Return permissions with details
:rtype: dict of {permission: [protectionLevel, label, description]}
"""
l = {}
for i in self.permissions:
if i in self.permission_module:
x = self.permission_module[i]
l[i] = [x["protectionLevel"], x["label"], x["description"]]
else:
# FIXME: the permission might be signature, if it is defined by the app itself!
l[i] = ["normal", "Unknown permission from android reference",
"Unknown permission from android reference"]
return l
@DeprecationWarning
def get_requested_permissions(self):
"""
Returns all requested permissions.
It has the same result as :meth:`get_permissions` and might be removed in the future
:rtype: list of str
"""
return self.get_permissions()
def get_requested_aosp_permissions(self):
"""
Returns requested permissions declared within AOSP project.
This includes several other permissions as well, which are in the platform apps.
:rtype: list of str
"""
aosp_permissions = []
all_permissions = self.get_permissions()
for perm in all_permissions:
if perm in list(self.permission_module.keys()):
aosp_permissions.append(perm)
return aosp_permissions
def get_requested_aosp_permissions_details(self):
"""
Returns requested aosp permissions with details.
:rtype: dictionary
"""
l = {}
for i in self.permissions:
try:
l[i] = self.permission_module[i]
except KeyError:
# if we have not found permission do nothing
continue
return l
def get_requested_third_party_permissions(self):
"""
Returns list of requested permissions not declared within AOSP project.
:rtype: list of strings
"""
third_party_permissions = []
all_permissions = self.get_permissions()
for perm in all_permissions:
if perm not in list(self.permission_module.keys()):
third_party_permissions.append(perm)
return third_party_permissions
def get_declared_permissions(self):
"""
Returns list of the declared permissions.
:rtype: list of strings
"""
return list(self.declared_permissions.keys())
def get_declared_permissions_details(self):
"""
Returns declared permissions with the details.
:rtype: dict
"""
return self.declared_permissions
def get_max_sdk_version(self):
"""
Return the android:maxSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "maxSdkVersion")
def get_min_sdk_version(self):
"""
Return the android:minSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "minSdkVersion")
def get_target_sdk_version(self):
"""
Return the android:targetSdkVersion attribute
:rtype: string
"""
return self.get_attribute_value("uses-sdk", "targetSdkVersion")
def get_effective_target_sdk_version(self):
"""
Return the effective targetSdkVersion, always returns int > 0.
If the targetSdkVersion is not set, it defaults to 1. This is
set based on defaults as defined in:
https://developer.android.com/guide/topics/manifest/uses-sdk-element.html
:rtype: int
"""
target_sdk_version = self.get_target_sdk_version()
if not target_sdk_version:
target_sdk_version = self.get_min_sdk_version()
try:
return int(target_sdk_version)
except (ValueError, TypeError):
return 1
def get_libraries(self):
"""
Return the android:name attributes for libraries
:rtype: list
"""
return list(self.get_all_attribute_value("uses-library", "name"))
def get_features(self):
"""
Return a list of all android:names found for the tag uses-feature
in the AndroidManifest.xml
:return: list
"""
return list(self.get_all_attribute_value("uses-feature", "name"))
def is_wearable(self):
"""
Checks if this application is build for wearables by
checking if it uses the feature 'android.hardware.type.watch'
See: https://developer.android.com/training/wearables/apps/creating.html for more information.
Not every app is setting this feature (not even the example Google provides),
so it might be wise to not 100% rely on this feature.
:return: True if wearable, False otherwise
"""
return 'android.hardware.type.watch' in self.get_features()
def is_leanback(self):
"""
Checks if this application is build for TV (Leanback support)
by checkin if it uses the feature 'android.software.leanback'
:return: True if leanback feature is used, false otherwise
"""
return 'android.software.leanback' in self.get_features()
def is_androidtv(self):
"""
Checks if this application does not require a touchscreen,
as this is the rule to get into the TV section of the Play Store
See: https://developer.android.com/training/tv/start/start.html for more information.
:return: True if 'android.hardware.touchscreen' is not required, False otherwise
"""
return self.get_attribute_value(
'uses-feature', 'name', required="false",
name="android.hardware.touchscreen"
) == "android.hardware.touchscreen"
def new_zip(self, filename, deleted_files=None, new_files={}):
"""
Create a new zip file
:param filename: the output filename of the zip
:param deleted_files: a regex pattern to remove specific file
:param new_files: a dictionnary of new files
:type filename: string
:type deleted_files: None or a string
:type new_files: a dictionnary (key:filename, value:content of the file)
"""
zout = zipfile.ZipFile(filename, 'w')
for item in self.zip.infolist():
# Block one: deleted_files, or deleted_files and new_files
if deleted_files is not None:
if re.match(deleted_files, item.filename) is None:
# if the regex of deleted_files doesn't match the filename
if new_files is not False:
if item.filename in new_files:
# and if the filename is in new_files
zout.writestr(item, new_files[item.filename])
continue
# Otherwise, write the original file.
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
# Block two: deleted_files is None, new_files is not empty
elif new_files is not False:
if item.filename in new_files:
zout.writestr(item, new_files[item.filename])
else:
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
# Block three: deleted_files is None, new_files is empty.
# Just write out the default zip
else:
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
zout.close()
def get_android_manifest_axml(self):
"""
Return the :class:`AXMLPrinter` object which corresponds to the AndroidManifest.xml file
:rtype: :class:`~androguard.core.bytecodes.axml.AXMLPrinter`
"""
try:
return self.axml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_manifest_xml(self):
"""
Return the parsed xml object which corresponds to the AndroidManifest.xml file
:rtype: :class:`~lxml.etree.Element`
"""
try:
return self.xml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_resources(self):
"""
Return the :class:`~androguard.core.bytecodes.axml.ARSCParser`
object which corresponds to the resources.arsc file
:rtype: :class:`~androguard.core.bytecodes.axml.ARSCParser`
"""
try:
return self.arsc["resources.arsc"]
except KeyError:
if "resources.arsc" not in self.zip.namelist():
# There is a rare case, that no resource file is supplied.
# Maybe it was added manually, thus we check here
return None
self.arsc["resources.arsc"] = ARSCParser(self.zip.read("resources.arsc"))
return self.arsc["resources.arsc"]
def show(self):
self.get_files_types()
print("FILES: ")
for i in self.get_files():
try:
print("\t", i, self._files[i], "%x" % self.files_crc32[i])
except KeyError:
print("\t", i, "%x" % self.files_crc32[i])
print("DECLARED PERMISSIONS:")
declared_permissions = self.get_declared_permissions()
for i in declared_permissions:
print("\t", i)
print("REQUESTED PERMISSIONS:")
requested_permissions = self.get_permissions()
for i in requested_permissions:
print("\t", i)
print("MAIN ACTIVITY: ", self.get_main_activity())
print("ACTIVITIES: ")
activities = self.get_activities()
for i in activities:
filters = self.get_intent_filters("activity", i)
print("\t", i, filters or "")
print("SERVICES: ")
services = self.get_services()
for i in services:
filters = self.get_intent_filters("service", i)
print("\t", i, filters or "")
print("RECEIVERS: ")
receivers = self.get_receivers()
for i in receivers:
filters = self.get_intent_filters("receiver", i)
print("\t", i, filters or "")
print("PROVIDERS: ", self.get_providers())
@property
def application(self):
return self.get_app_name()
@property
def packagename(self):
return self.get_package()
@property
def version_name(self):
return self.get_androidversion_name()
@property
def version_code(self):
return self.get_androidversion_code()
@property
def icon_info(self):
return self.get_app_icon()
@property
def icon_data(self):
app_icon_file = self.get_app_icon()
app_icon_data = None
try:
app_icon_data = self.get_file(app_icon_file)
except FileNotPresent:
try:
app_icon_data = self.get_file(app_icon_file.encode().decode('cp437'))
except FileNotPresent:
pass
return app_icon_data
|
appknox/pyaxmlparser | pyaxmlparser/axmlparser.py | AXMLParser.name | python | def name(self):
if self.m_name == -1 or (self.m_event != const.START_TAG and self.m_event != const.END_TAG):
return u''
return self.sb[self.m_name] | Return the String assosciated with the tag name | train | https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/axmlparser.py#L385-L392 | null | class AXMLParser(object):
"""
AXMLParser reads through all chunks in the AXML file
and implements a state machine to return information about
the current chunk, which can then be read by :class:`~AXMLPrinter`.
An AXML file is a file which contains multiple chunks of data, defined
by the `ResChunk_header`.
There is no real file magic but as the size of the first header is fixed
and the `type` of the `ResChunk_header` is set to `RES_XML_TYPE`, a file
will usually start with `0x03000800`.
But there are several examples where the `type` is set to something
else, probably in order to fool parsers.
Typically the AXMLParser is used in a loop which terminates if `m_event` is set to `END_DOCUMENT`.
You can use the `next()` function to get the next chunk.
Note that not all chunk types are yielded from the iterator! Some chunks are processed in
the AXMLParser only.
The parser will set `is_valid()` to False if it parses something not valid.
Messages what is wrong are logged.
See http://androidxref.com/9.0.0_r3/xref/frameworks/base/libs/androidfw/include/androidfw/ResourceTypes.h#563
"""
def __init__(self, raw_buff):
self._reset()
self._valid = True
self.axml_tampered = False
self.buff = bytecode.BuffHandle(raw_buff)
# Minimum is a single ARSCHeader, which would be a strange edge case...
if self.buff.size() < 8:
log.error("Filesize is too small to be a valid AXML file! Filesize: {}".format(self.buff.size()))
self._valid = False
return
# This would be even stranger, if an AXML file is larger than 4GB...
# But this is not possible as the maximum chunk size is a unsigned 4 byte int.
if self.buff.size() > 0xFFFFFFFF:
log.error("Filesize is too large to be a valid AXML file! Filesize: {}".format(self.buff.size()))
self._valid = False
return
try:
axml_header = ARSCHeader(self.buff)
except AssertionError as e:
log.error("Error parsing first resource header: %s", e)
self._valid = False
return
self.filesize = axml_header.size
if axml_header.header_size == 28024:
# Can be a common error: the file is not an AXML but a plain XML
# The file will then usually start with '<?xm' / '3C 3F 78 6D'
log.warning("Header size is 28024! Are you trying to parse a plain XML file?")
if axml_header.header_size != 8:
log.error(
"This does not look like an AXML file. "
"header size does not equal 8! header size = {}".format(
axml_header.header_size
)
)
self._valid = False
return
if self.filesize > self.buff.size():
log.error(
"This does not look like an AXML file. "
"Declared filesize does not match real size: {} vs {}".format(
self.filesize, self.buff.size()
)
)
self._valid = False
return
if self.filesize < self.buff.size():
# The file can still be parsed up to the point where the chunk should end.
self.axml_tampered = True
log.warning(
"Declared filesize ({}) is smaller than total file size ({}). "
"Was something appended to the file? Trying to parse it anyways.".format(
self.filesize, self.buff.size()
)
)
# Not that severe of an error, we have plenty files where this is not
# set correctly
if axml_header.type != const.RES_XML_TYPE:
self.axml_tampered = True
log.warning(
"AXML file has an unusual resource type! "
"Malware likes to to such stuff to anti androguard! "
"But we try to parse it anyways. "
"Resource Type: 0x{:04x}".format(axml_header.type)
)
# Now we parse the STRING POOL
try:
header = ARSCHeader(self.buff)
except AssertionError as e:
log.error("Error parsing resource header of string pool: %s", e)
self._valid = False
return
if header.header_size != 0x1C:
log.error(
"This does not look like an AXML file. String chunk header "
"size does not equal 28! header size = {}".format(
header.header_size
)
)
self._valid = False
return
if header.type != const.RES_STRING_POOL_TYPE:
log.error(
"Expected String Pool header, got resource type 0x{:04x} "
"instead".format(header.type)
)
self._valid = False
return
self.sb = StringBlock(self.buff, header)
# Stores resource ID mappings, if any
self.m_resourceIDs = []
# Store a list of prefix/uri mappings encountered
self.namespaces = []
def is_valid(self):
"""
Get the state of the AXMLPrinter.
if an error happend somewhere in the process of parsing the file,
this flag is set to False.
"""
return self._valid
def _reset(self):
self.m_event = -1
self.m_lineNumber = -1
self.m_name = -1
self.m_namespaceUri = -1
self.m_attributes = []
self.m_idAttribute = -1
self.m_classAttribute = -1
self.m_styleAttribute = -1
def __next__(self):
self._do_next()
return self.m_event
def _do_next(self):
if self.m_event == const.END_DOCUMENT:
return
self._reset()
while self._valid:
# Stop at the declared filesize or at the end of the file
if self.buff.end() or self.buff.get_idx() == self.filesize:
self.m_event = const.END_DOCUMENT
break
# Again, we read an ARSCHeader
try:
h = ARSCHeader(self.buff)
except AssertionError as e:
log.error("Error parsing resource header: %s", e)
self._valid = False
return
# Special chunk: Resource Map. This chunk might be contained inside
# the file, after the string pool.
if h.type == const.RES_XML_RESOURCE_MAP_TYPE:
log.debug("AXML contains a RESOURCE MAP")
# Check size: < 8 bytes mean that the chunk is not complete
# Should be aligned to 4 bytes.
if h.size < 8 or (h.size % 4) != 0:
log.error("Invalid chunk size in chunk XML_RESOURCE_MAP")
self._valid = False
return
for i in range((h.size - h.header_size) // 4):
self.m_resourceIDs.append(unpack('<L', self.buff.read(4))[0])
continue
# Parse now the XML chunks.
# unknown chunk types might cause problems, but we can skip them!
if h.type < const.RES_XML_FIRST_CHUNK_TYPE or h.type > const.RES_XML_LAST_CHUNK_TYPE:
# h.size is the size of the whole chunk including the header.
# We read already 8 bytes of the header, thus we need to
# subtract them.
log.error("Not a XML resource chunk type: 0x{:04x}. Skipping {} bytes".format(h.type, h.size))
self.buff.set_idx(h.end)
continue
# Check that we read a correct header
if h.header_size != 0x10:
log.error(
"XML Resource Type Chunk header size does not match 16! "
"At chunk type 0x{:04x}, declared header size={}, "
"chunk size={}".format(h.type, h.header_size, h.size)
)
self._valid = False
return
# Line Number of the source file, only used as meta information
self.m_lineNumber, = unpack('<L', self.buff.read(4))
# Comment_Index (usually 0xFFFFFFFF)
self.m_comment_index, = unpack('<L', self.buff.read(4))
if self.m_comment_index != 0xFFFFFFFF and h.type in [
const.RES_XML_START_NAMESPACE_TYPE,
const.RES_XML_END_NAMESPACE_TYPE]:
log.warning("Unhandled Comment at namespace chunk: '{}'".format(
self.sb[self.m_comment_index])
)
if h.type == const.RES_XML_START_NAMESPACE_TYPE:
prefix, = unpack('<L', self.buff.read(4))
uri, = unpack('<L', self.buff.read(4))
s_prefix = self.sb[prefix]
s_uri = self.sb[uri]
log.debug(
"Start of Namespace mapping: prefix "
"{}: '{}' --> uri {}: '{}'".format(
prefix, s_prefix, uri, s_uri
)
)
if s_uri == '':
log.warning("Namespace prefix '{}' resolves to empty URI. "
"This might be a packer.".format(s_prefix))
if (prefix, uri) in self.namespaces:
log.info(
"Namespace mapping ({}, {}) already seen! "
"This is usually not a problem but could indicate "
"packers or broken AXML compilers.".format(prefix, uri))
self.namespaces.append((prefix, uri))
# We can continue with the next chunk, as we store the namespace
# mappings for each tag
continue
if h.type == const.RES_XML_END_NAMESPACE_TYPE:
# END_PREFIX contains again prefix and uri field
prefix, = unpack('<L', self.buff.read(4))
uri, = unpack('<L', self.buff.read(4))
# We remove the last namespace mapping matching
if (prefix, uri) in self.namespaces:
self.namespaces.remove((prefix, uri))
else:
log.warning(
"Reached a NAMESPACE_END without having the namespace stored before? "
"Prefix ID: {}, URI ID: {}".format(prefix, uri)
)
# We can continue with the next chunk, as we store the namespace
# mappings for each tag
continue
# START_TAG is the start of a new tag.
if h.type == const.RES_XML_START_ELEMENT_TYPE:
# The TAG consists of some fields:
# * (chunk_size, line_number, comment_index - we read before)
# * namespace_uri
# * name
# * flags
# * attribute_count
# * class_attribute
# After that, there are two lists of attributes, 20 bytes each
# Namespace URI (String ID)
self.m_namespaceUri, = unpack('<L', self.buff.read(4))
# Name of the Tag (String ID)
self.m_name, = unpack('<L', self.buff.read(4))
# FIXME: Flags
_ = self.buff.read(4) # noqa
# Attribute Count
attributeCount, = unpack('<L', self.buff.read(4))
# Class Attribute
self.m_classAttribute, = unpack('<L', self.buff.read(4))
self.m_idAttribute = (attributeCount >> 16) - 1
self.m_attribute_count = attributeCount & 0xFFFF
self.m_styleAttribute = (self.m_classAttribute >> 16) - 1
self.m_classAttribute = (self.m_classAttribute & 0xFFFF) - 1
# Now, we parse the attributes.
# Each attribute has 5 fields of 4 byte
for i in range(0, self.m_attribute_count * const.ATTRIBUTE_LENGHT):
# Each field is linearly parsed into the array
# Each Attribute contains:
# * Namespace URI (String ID)
# * Name (String ID)
# * Value
# * Type
# * Data
self.m_attributes.append(unpack('<L', self.buff.read(4))[0])
# Then there are class_attributes
for i in range(const.ATTRIBUTE_IX_VALUE_TYPE, len(self.m_attributes), const.ATTRIBUTE_LENGHT):
self.m_attributes[i] = self.m_attributes[i] >> 24
self.m_event = const.START_TAG
break
if h.type == const.RES_XML_END_ELEMENT_TYPE:
self.m_namespaceUri, = unpack('<L', self.buff.read(4))
self.m_name, = unpack('<L', self.buff.read(4))
self.m_event = const.END_TAG
break
if h.type == const.RES_XML_CDATA_TYPE:
# The CDATA field is like an attribute.
# It contains an index into the String pool
# as well as a typed value.
# usually, this typed value is set to UNDEFINED
# ResStringPool_ref data --> uint32_t index
self.m_name, = unpack('<L', self.buff.read(4))
# Res_value typedData:
# uint16_t size
# uint8_t res0 -> always zero
# uint8_t dataType
# uint32_t data
# For now, we ingore these values
size, res0, dataType, data = unpack("<HBBL", self.buff.read(8))
log.debug(
"found a CDATA Chunk: "
"index={: 6d}, size={: 4d}, res0={: 4d}, "
"dataType={: 4d}, data={: 4d}".format(
self.m_name, size, res0, dataType, data
)
)
self.m_event = const.TEXT
break
# Still here? Looks like we read an unknown XML header, try to skip it...
log.warning("Unknown XML Chunk: 0x{:04x}, skipping {} bytes.".format(h.type, h.size))
self.buff.set_idx(h.end)
@property
@property
def comment(self):
"""
Return the comment at the current position or None if no comment is given
This works only for Tags, as the comments of Namespaces are silently dropped.
Currently, there is no way of retrieving comments of namespaces.
"""
if self.m_comment_index == 0xFFFFFFFF:
return None
return self.sb[self.m_comment_index]
@property
def namespace(self):
"""
Return the Namespace URI (if any) as a String for the current tag
"""
if self.m_name == -1 or (self.m_event != const.START_TAG and self.m_event != const.END_TAG):
return u''
# No Namespace
if self.m_namespaceUri == 0xFFFFFFFF:
return u''
return self.sb[self.m_namespaceUri]
@property
def nsmap(self):
"""
Returns the current namespace mapping as a dictionary
there are several problems with the map and we try to guess a few
things here:
1) a URI can be mapped by many prefixes, so it is to decide which one to take
2) a prefix might map to an empty string (some packers)
3) uri+prefix mappings might be included several times
4) prefix might be empty
"""
NSMAP = dict()
# solve 3) by using a set
for k, v in set(self.namespaces):
s_prefix = self.sb[k]
s_uri = self.sb[v]
# Solve 2) & 4) by not including
if s_uri != "" and s_prefix != "":
# solve 1) by using the last one in the list
NSMAP[s_prefix] = s_uri
return NSMAP
@property
def text(self):
"""
Return the String assosicated with the current text
"""
if self.m_name == -1 or self.m_event != const.TEXT:
return u''
return self.sb[self.m_name]
def getName(self):
"""
Legacy only!
use :py:attr:`~androguard.core.bytecodes.AXMLParser.name` instead
"""
return self.name
def getText(self):
"""
Legacy only!
use :py:attr:`~androguard.core.bytecodes.AXMLParser.text` instead
"""
return self.text
def getPrefix(self):
"""
Legacy only!
use :py:attr:`~androguard.core.bytecodes.AXMLParser.namespace` instead
"""
return self.namespace
def _get_attribute_offset(self, index):
"""
Return the start inside the m_attributes array for a given attribute
"""
if self.m_event != const.START_TAG:
log.warning("Current event is not START_TAG.")
offset = index * const.ATTRIBUTE_LENGHT
if offset >= len(self.m_attributes):
log.warning("Invalid attribute index")
return offset
def getAttributeCount(self):
"""
Return the number of Attributes for a Tag
or -1 if not in a tag
"""
if self.m_event != const.START_TAG:
return -1
return self.m_attribute_count
def getAttributeUri(self, index):
"""
Returns the numeric ID for the namespace URI of an attribute
"""
offset = self._get_attribute_offset(index)
uri = self.m_attributes[offset + const.ATTRIBUTE_IX_NAMESPACE_URI]
return uri
def getAttributeNamespace(self, index):
"""
Return the Namespace URI (if any) for the attribute
"""
uri = self.getAttributeUri(index)
# No Namespace
if uri == 0xFFFFFFFF:
return u''
return self.sb[uri]
def getAttributeName(self, index):
"""
Returns the String which represents the attribute name
"""
offset = self._get_attribute_offset(index)
name = self.m_attributes[offset + const.ATTRIBUTE_IX_NAME]
res = self.sb[name]
# If the result is a (null) string, we need to look it up.
if not res:
attr = self.m_resourceIDs[name]
if attr in public.SYSTEM_RESOURCES['attributes']['inverse']:
res = 'android:' + public.SYSTEM_RESOURCES['attributes']['inverse'][attr]
else:
# Attach the HEX Number, so for multiple missing attributes we do not run
# into problems.
res = 'android:UNKNOWN_SYSTEM_ATTRIBUTE_{:08x}'.format(attr)
return res
def getAttributeValueType(self, index):
"""
Return the type of the attribute at the given index
:param index: index of the attribute
"""
offset = self._get_attribute_offset(index)
return self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_TYPE]
def getAttributeValueData(self, index):
"""
Return the data of the attribute at the given index
:param index: index of the attribute
"""
offset = self._get_attribute_offset(index)
return self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_DATA]
def getAttributeValue(self, index):
"""
This function is only used to look up strings
All other work is done by
:func:`~androguard.core.bytecodes.axml.format_value`
# FIXME should unite those functions
:param index: index of the attribute
:return:
"""
offset = self._get_attribute_offset(index)
valueType = self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_TYPE]
if valueType == const.TYPE_STRING:
valueString = self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_STRING]
return self.sb[valueString]
return u''
|
appknox/pyaxmlparser | pyaxmlparser/axmlparser.py | AXMLParser.namespace | python | def namespace(self):
if self.m_name == -1 or (self.m_event != const.START_TAG and self.m_event != const.END_TAG):
return u''
# No Namespace
if self.m_namespaceUri == 0xFFFFFFFF:
return u''
return self.sb[self.m_namespaceUri] | Return the Namespace URI (if any) as a String for the current tag | train | https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/axmlparser.py#L408-L419 | null | class AXMLParser(object):
"""
AXMLParser reads through all chunks in the AXML file
and implements a state machine to return information about
the current chunk, which can then be read by :class:`~AXMLPrinter`.
An AXML file is a file which contains multiple chunks of data, defined
by the `ResChunk_header`.
There is no real file magic but as the size of the first header is fixed
and the `type` of the `ResChunk_header` is set to `RES_XML_TYPE`, a file
will usually start with `0x03000800`.
But there are several examples where the `type` is set to something
else, probably in order to fool parsers.
Typically the AXMLParser is used in a loop which terminates if `m_event` is set to `END_DOCUMENT`.
You can use the `next()` function to get the next chunk.
Note that not all chunk types are yielded from the iterator! Some chunks are processed in
the AXMLParser only.
The parser will set `is_valid()` to False if it parses something not valid.
Messages what is wrong are logged.
See http://androidxref.com/9.0.0_r3/xref/frameworks/base/libs/androidfw/include/androidfw/ResourceTypes.h#563
"""
def __init__(self, raw_buff):
self._reset()
self._valid = True
self.axml_tampered = False
self.buff = bytecode.BuffHandle(raw_buff)
# Minimum is a single ARSCHeader, which would be a strange edge case...
if self.buff.size() < 8:
log.error("Filesize is too small to be a valid AXML file! Filesize: {}".format(self.buff.size()))
self._valid = False
return
# This would be even stranger, if an AXML file is larger than 4GB...
# But this is not possible as the maximum chunk size is a unsigned 4 byte int.
if self.buff.size() > 0xFFFFFFFF:
log.error("Filesize is too large to be a valid AXML file! Filesize: {}".format(self.buff.size()))
self._valid = False
return
try:
axml_header = ARSCHeader(self.buff)
except AssertionError as e:
log.error("Error parsing first resource header: %s", e)
self._valid = False
return
self.filesize = axml_header.size
if axml_header.header_size == 28024:
# Can be a common error: the file is not an AXML but a plain XML
# The file will then usually start with '<?xm' / '3C 3F 78 6D'
log.warning("Header size is 28024! Are you trying to parse a plain XML file?")
if axml_header.header_size != 8:
log.error(
"This does not look like an AXML file. "
"header size does not equal 8! header size = {}".format(
axml_header.header_size
)
)
self._valid = False
return
if self.filesize > self.buff.size():
log.error(
"This does not look like an AXML file. "
"Declared filesize does not match real size: {} vs {}".format(
self.filesize, self.buff.size()
)
)
self._valid = False
return
if self.filesize < self.buff.size():
# The file can still be parsed up to the point where the chunk should end.
self.axml_tampered = True
log.warning(
"Declared filesize ({}) is smaller than total file size ({}). "
"Was something appended to the file? Trying to parse it anyways.".format(
self.filesize, self.buff.size()
)
)
# Not that severe of an error, we have plenty files where this is not
# set correctly
if axml_header.type != const.RES_XML_TYPE:
self.axml_tampered = True
log.warning(
"AXML file has an unusual resource type! "
"Malware likes to to such stuff to anti androguard! "
"But we try to parse it anyways. "
"Resource Type: 0x{:04x}".format(axml_header.type)
)
# Now we parse the STRING POOL
try:
header = ARSCHeader(self.buff)
except AssertionError as e:
log.error("Error parsing resource header of string pool: %s", e)
self._valid = False
return
if header.header_size != 0x1C:
log.error(
"This does not look like an AXML file. String chunk header "
"size does not equal 28! header size = {}".format(
header.header_size
)
)
self._valid = False
return
if header.type != const.RES_STRING_POOL_TYPE:
log.error(
"Expected String Pool header, got resource type 0x{:04x} "
"instead".format(header.type)
)
self._valid = False
return
self.sb = StringBlock(self.buff, header)
# Stores resource ID mappings, if any
self.m_resourceIDs = []
# Store a list of prefix/uri mappings encountered
self.namespaces = []
def is_valid(self):
"""
Get the state of the AXMLPrinter.
if an error happend somewhere in the process of parsing the file,
this flag is set to False.
"""
return self._valid
def _reset(self):
self.m_event = -1
self.m_lineNumber = -1
self.m_name = -1
self.m_namespaceUri = -1
self.m_attributes = []
self.m_idAttribute = -1
self.m_classAttribute = -1
self.m_styleAttribute = -1
def __next__(self):
self._do_next()
return self.m_event
def _do_next(self):
if self.m_event == const.END_DOCUMENT:
return
self._reset()
while self._valid:
# Stop at the declared filesize or at the end of the file
if self.buff.end() or self.buff.get_idx() == self.filesize:
self.m_event = const.END_DOCUMENT
break
# Again, we read an ARSCHeader
try:
h = ARSCHeader(self.buff)
except AssertionError as e:
log.error("Error parsing resource header: %s", e)
self._valid = False
return
# Special chunk: Resource Map. This chunk might be contained inside
# the file, after the string pool.
if h.type == const.RES_XML_RESOURCE_MAP_TYPE:
log.debug("AXML contains a RESOURCE MAP")
# Check size: < 8 bytes mean that the chunk is not complete
# Should be aligned to 4 bytes.
if h.size < 8 or (h.size % 4) != 0:
log.error("Invalid chunk size in chunk XML_RESOURCE_MAP")
self._valid = False
return
for i in range((h.size - h.header_size) // 4):
self.m_resourceIDs.append(unpack('<L', self.buff.read(4))[0])
continue
# Parse now the XML chunks.
# unknown chunk types might cause problems, but we can skip them!
if h.type < const.RES_XML_FIRST_CHUNK_TYPE or h.type > const.RES_XML_LAST_CHUNK_TYPE:
# h.size is the size of the whole chunk including the header.
# We read already 8 bytes of the header, thus we need to
# subtract them.
log.error("Not a XML resource chunk type: 0x{:04x}. Skipping {} bytes".format(h.type, h.size))
self.buff.set_idx(h.end)
continue
# Check that we read a correct header
if h.header_size != 0x10:
log.error(
"XML Resource Type Chunk header size does not match 16! "
"At chunk type 0x{:04x}, declared header size={}, "
"chunk size={}".format(h.type, h.header_size, h.size)
)
self._valid = False
return
# Line Number of the source file, only used as meta information
self.m_lineNumber, = unpack('<L', self.buff.read(4))
# Comment_Index (usually 0xFFFFFFFF)
self.m_comment_index, = unpack('<L', self.buff.read(4))
if self.m_comment_index != 0xFFFFFFFF and h.type in [
const.RES_XML_START_NAMESPACE_TYPE,
const.RES_XML_END_NAMESPACE_TYPE]:
log.warning("Unhandled Comment at namespace chunk: '{}'".format(
self.sb[self.m_comment_index])
)
if h.type == const.RES_XML_START_NAMESPACE_TYPE:
prefix, = unpack('<L', self.buff.read(4))
uri, = unpack('<L', self.buff.read(4))
s_prefix = self.sb[prefix]
s_uri = self.sb[uri]
log.debug(
"Start of Namespace mapping: prefix "
"{}: '{}' --> uri {}: '{}'".format(
prefix, s_prefix, uri, s_uri
)
)
if s_uri == '':
log.warning("Namespace prefix '{}' resolves to empty URI. "
"This might be a packer.".format(s_prefix))
if (prefix, uri) in self.namespaces:
log.info(
"Namespace mapping ({}, {}) already seen! "
"This is usually not a problem but could indicate "
"packers or broken AXML compilers.".format(prefix, uri))
self.namespaces.append((prefix, uri))
# We can continue with the next chunk, as we store the namespace
# mappings for each tag
continue
if h.type == const.RES_XML_END_NAMESPACE_TYPE:
# END_PREFIX contains again prefix and uri field
prefix, = unpack('<L', self.buff.read(4))
uri, = unpack('<L', self.buff.read(4))
# We remove the last namespace mapping matching
if (prefix, uri) in self.namespaces:
self.namespaces.remove((prefix, uri))
else:
log.warning(
"Reached a NAMESPACE_END without having the namespace stored before? "
"Prefix ID: {}, URI ID: {}".format(prefix, uri)
)
# We can continue with the next chunk, as we store the namespace
# mappings for each tag
continue
# START_TAG is the start of a new tag.
if h.type == const.RES_XML_START_ELEMENT_TYPE:
# The TAG consists of some fields:
# * (chunk_size, line_number, comment_index - we read before)
# * namespace_uri
# * name
# * flags
# * attribute_count
# * class_attribute
# After that, there are two lists of attributes, 20 bytes each
# Namespace URI (String ID)
self.m_namespaceUri, = unpack('<L', self.buff.read(4))
# Name of the Tag (String ID)
self.m_name, = unpack('<L', self.buff.read(4))
# FIXME: Flags
_ = self.buff.read(4) # noqa
# Attribute Count
attributeCount, = unpack('<L', self.buff.read(4))
# Class Attribute
self.m_classAttribute, = unpack('<L', self.buff.read(4))
self.m_idAttribute = (attributeCount >> 16) - 1
self.m_attribute_count = attributeCount & 0xFFFF
self.m_styleAttribute = (self.m_classAttribute >> 16) - 1
self.m_classAttribute = (self.m_classAttribute & 0xFFFF) - 1
# Now, we parse the attributes.
# Each attribute has 5 fields of 4 byte
for i in range(0, self.m_attribute_count * const.ATTRIBUTE_LENGHT):
# Each field is linearly parsed into the array
# Each Attribute contains:
# * Namespace URI (String ID)
# * Name (String ID)
# * Value
# * Type
# * Data
self.m_attributes.append(unpack('<L', self.buff.read(4))[0])
# Then there are class_attributes
for i in range(const.ATTRIBUTE_IX_VALUE_TYPE, len(self.m_attributes), const.ATTRIBUTE_LENGHT):
self.m_attributes[i] = self.m_attributes[i] >> 24
self.m_event = const.START_TAG
break
if h.type == const.RES_XML_END_ELEMENT_TYPE:
self.m_namespaceUri, = unpack('<L', self.buff.read(4))
self.m_name, = unpack('<L', self.buff.read(4))
self.m_event = const.END_TAG
break
if h.type == const.RES_XML_CDATA_TYPE:
# The CDATA field is like an attribute.
# It contains an index into the String pool
# as well as a typed value.
# usually, this typed value is set to UNDEFINED
# ResStringPool_ref data --> uint32_t index
self.m_name, = unpack('<L', self.buff.read(4))
# Res_value typedData:
# uint16_t size
# uint8_t res0 -> always zero
# uint8_t dataType
# uint32_t data
# For now, we ingore these values
size, res0, dataType, data = unpack("<HBBL", self.buff.read(8))
log.debug(
"found a CDATA Chunk: "
"index={: 6d}, size={: 4d}, res0={: 4d}, "
"dataType={: 4d}, data={: 4d}".format(
self.m_name, size, res0, dataType, data
)
)
self.m_event = const.TEXT
break
# Still here? Looks like we read an unknown XML header, try to skip it...
log.warning("Unknown XML Chunk: 0x{:04x}, skipping {} bytes.".format(h.type, h.size))
self.buff.set_idx(h.end)
@property
def name(self):
"""
Return the String assosciated with the tag name
"""
if self.m_name == -1 or (self.m_event != const.START_TAG and self.m_event != const.END_TAG):
return u''
return self.sb[self.m_name]
@property
def comment(self):
"""
Return the comment at the current position or None if no comment is given
This works only for Tags, as the comments of Namespaces are silently dropped.
Currently, there is no way of retrieving comments of namespaces.
"""
if self.m_comment_index == 0xFFFFFFFF:
return None
return self.sb[self.m_comment_index]
@property
@property
def nsmap(self):
"""
Returns the current namespace mapping as a dictionary
there are several problems with the map and we try to guess a few
things here:
1) a URI can be mapped by many prefixes, so it is to decide which one to take
2) a prefix might map to an empty string (some packers)
3) uri+prefix mappings might be included several times
4) prefix might be empty
"""
NSMAP = dict()
# solve 3) by using a set
for k, v in set(self.namespaces):
s_prefix = self.sb[k]
s_uri = self.sb[v]
# Solve 2) & 4) by not including
if s_uri != "" and s_prefix != "":
# solve 1) by using the last one in the list
NSMAP[s_prefix] = s_uri
return NSMAP
@property
def text(self):
"""
Return the String assosicated with the current text
"""
if self.m_name == -1 or self.m_event != const.TEXT:
return u''
return self.sb[self.m_name]
def getName(self):
"""
Legacy only!
use :py:attr:`~androguard.core.bytecodes.AXMLParser.name` instead
"""
return self.name
def getText(self):
"""
Legacy only!
use :py:attr:`~androguard.core.bytecodes.AXMLParser.text` instead
"""
return self.text
def getPrefix(self):
"""
Legacy only!
use :py:attr:`~androguard.core.bytecodes.AXMLParser.namespace` instead
"""
return self.namespace
def _get_attribute_offset(self, index):
"""
Return the start inside the m_attributes array for a given attribute
"""
if self.m_event != const.START_TAG:
log.warning("Current event is not START_TAG.")
offset = index * const.ATTRIBUTE_LENGHT
if offset >= len(self.m_attributes):
log.warning("Invalid attribute index")
return offset
def getAttributeCount(self):
"""
Return the number of Attributes for a Tag
or -1 if not in a tag
"""
if self.m_event != const.START_TAG:
return -1
return self.m_attribute_count
def getAttributeUri(self, index):
"""
Returns the numeric ID for the namespace URI of an attribute
"""
offset = self._get_attribute_offset(index)
uri = self.m_attributes[offset + const.ATTRIBUTE_IX_NAMESPACE_URI]
return uri
def getAttributeNamespace(self, index):
"""
Return the Namespace URI (if any) for the attribute
"""
uri = self.getAttributeUri(index)
# No Namespace
if uri == 0xFFFFFFFF:
return u''
return self.sb[uri]
def getAttributeName(self, index):
"""
Returns the String which represents the attribute name
"""
offset = self._get_attribute_offset(index)
name = self.m_attributes[offset + const.ATTRIBUTE_IX_NAME]
res = self.sb[name]
# If the result is a (null) string, we need to look it up.
if not res:
attr = self.m_resourceIDs[name]
if attr in public.SYSTEM_RESOURCES['attributes']['inverse']:
res = 'android:' + public.SYSTEM_RESOURCES['attributes']['inverse'][attr]
else:
# Attach the HEX Number, so for multiple missing attributes we do not run
# into problems.
res = 'android:UNKNOWN_SYSTEM_ATTRIBUTE_{:08x}'.format(attr)
return res
def getAttributeValueType(self, index):
"""
Return the type of the attribute at the given index
:param index: index of the attribute
"""
offset = self._get_attribute_offset(index)
return self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_TYPE]
def getAttributeValueData(self, index):
"""
Return the data of the attribute at the given index
:param index: index of the attribute
"""
offset = self._get_attribute_offset(index)
return self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_DATA]
def getAttributeValue(self, index):
"""
This function is only used to look up strings
All other work is done by
:func:`~androguard.core.bytecodes.axml.format_value`
# FIXME should unite those functions
:param index: index of the attribute
:return:
"""
offset = self._get_attribute_offset(index)
valueType = self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_TYPE]
if valueType == const.TYPE_STRING:
valueString = self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_STRING]
return self.sb[valueString]
return u''
|
appknox/pyaxmlparser | pyaxmlparser/axmlparser.py | AXMLParser.text | python | def text(self):
if self.m_name == -1 or self.m_event != const.TEXT:
return u''
return self.sb[self.m_name] | Return the String assosicated with the current text | train | https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/axmlparser.py#L448-L455 | null | class AXMLParser(object):
"""
AXMLParser reads through all chunks in the AXML file
and implements a state machine to return information about
the current chunk, which can then be read by :class:`~AXMLPrinter`.
An AXML file is a file which contains multiple chunks of data, defined
by the `ResChunk_header`.
There is no real file magic but as the size of the first header is fixed
and the `type` of the `ResChunk_header` is set to `RES_XML_TYPE`, a file
will usually start with `0x03000800`.
But there are several examples where the `type` is set to something
else, probably in order to fool parsers.
Typically the AXMLParser is used in a loop which terminates if `m_event` is set to `END_DOCUMENT`.
You can use the `next()` function to get the next chunk.
Note that not all chunk types are yielded from the iterator! Some chunks are processed in
the AXMLParser only.
The parser will set `is_valid()` to False if it parses something not valid.
Messages what is wrong are logged.
See http://androidxref.com/9.0.0_r3/xref/frameworks/base/libs/androidfw/include/androidfw/ResourceTypes.h#563
"""
def __init__(self, raw_buff):
self._reset()
self._valid = True
self.axml_tampered = False
self.buff = bytecode.BuffHandle(raw_buff)
# Minimum is a single ARSCHeader, which would be a strange edge case...
if self.buff.size() < 8:
log.error("Filesize is too small to be a valid AXML file! Filesize: {}".format(self.buff.size()))
self._valid = False
return
# This would be even stranger, if an AXML file is larger than 4GB...
# But this is not possible as the maximum chunk size is a unsigned 4 byte int.
if self.buff.size() > 0xFFFFFFFF:
log.error("Filesize is too large to be a valid AXML file! Filesize: {}".format(self.buff.size()))
self._valid = False
return
try:
axml_header = ARSCHeader(self.buff)
except AssertionError as e:
log.error("Error parsing first resource header: %s", e)
self._valid = False
return
self.filesize = axml_header.size
if axml_header.header_size == 28024:
# Can be a common error: the file is not an AXML but a plain XML
# The file will then usually start with '<?xm' / '3C 3F 78 6D'
log.warning("Header size is 28024! Are you trying to parse a plain XML file?")
if axml_header.header_size != 8:
log.error(
"This does not look like an AXML file. "
"header size does not equal 8! header size = {}".format(
axml_header.header_size
)
)
self._valid = False
return
if self.filesize > self.buff.size():
log.error(
"This does not look like an AXML file. "
"Declared filesize does not match real size: {} vs {}".format(
self.filesize, self.buff.size()
)
)
self._valid = False
return
if self.filesize < self.buff.size():
# The file can still be parsed up to the point where the chunk should end.
self.axml_tampered = True
log.warning(
"Declared filesize ({}) is smaller than total file size ({}). "
"Was something appended to the file? Trying to parse it anyways.".format(
self.filesize, self.buff.size()
)
)
# Not that severe of an error, we have plenty files where this is not
# set correctly
if axml_header.type != const.RES_XML_TYPE:
self.axml_tampered = True
log.warning(
"AXML file has an unusual resource type! "
"Malware likes to to such stuff to anti androguard! "
"But we try to parse it anyways. "
"Resource Type: 0x{:04x}".format(axml_header.type)
)
# Now we parse the STRING POOL
try:
header = ARSCHeader(self.buff)
except AssertionError as e:
log.error("Error parsing resource header of string pool: %s", e)
self._valid = False
return
if header.header_size != 0x1C:
log.error(
"This does not look like an AXML file. String chunk header "
"size does not equal 28! header size = {}".format(
header.header_size
)
)
self._valid = False
return
if header.type != const.RES_STRING_POOL_TYPE:
log.error(
"Expected String Pool header, got resource type 0x{:04x} "
"instead".format(header.type)
)
self._valid = False
return
self.sb = StringBlock(self.buff, header)
# Stores resource ID mappings, if any
self.m_resourceIDs = []
# Store a list of prefix/uri mappings encountered
self.namespaces = []
def is_valid(self):
"""
Get the state of the AXMLPrinter.
if an error happend somewhere in the process of parsing the file,
this flag is set to False.
"""
return self._valid
def _reset(self):
self.m_event = -1
self.m_lineNumber = -1
self.m_name = -1
self.m_namespaceUri = -1
self.m_attributes = []
self.m_idAttribute = -1
self.m_classAttribute = -1
self.m_styleAttribute = -1
def __next__(self):
self._do_next()
return self.m_event
def _do_next(self):
if self.m_event == const.END_DOCUMENT:
return
self._reset()
while self._valid:
# Stop at the declared filesize or at the end of the file
if self.buff.end() or self.buff.get_idx() == self.filesize:
self.m_event = const.END_DOCUMENT
break
# Again, we read an ARSCHeader
try:
h = ARSCHeader(self.buff)
except AssertionError as e:
log.error("Error parsing resource header: %s", e)
self._valid = False
return
# Special chunk: Resource Map. This chunk might be contained inside
# the file, after the string pool.
if h.type == const.RES_XML_RESOURCE_MAP_TYPE:
log.debug("AXML contains a RESOURCE MAP")
# Check size: < 8 bytes mean that the chunk is not complete
# Should be aligned to 4 bytes.
if h.size < 8 or (h.size % 4) != 0:
log.error("Invalid chunk size in chunk XML_RESOURCE_MAP")
self._valid = False
return
for i in range((h.size - h.header_size) // 4):
self.m_resourceIDs.append(unpack('<L', self.buff.read(4))[0])
continue
# Parse now the XML chunks.
# unknown chunk types might cause problems, but we can skip them!
if h.type < const.RES_XML_FIRST_CHUNK_TYPE or h.type > const.RES_XML_LAST_CHUNK_TYPE:
# h.size is the size of the whole chunk including the header.
# We read already 8 bytes of the header, thus we need to
# subtract them.
log.error("Not a XML resource chunk type: 0x{:04x}. Skipping {} bytes".format(h.type, h.size))
self.buff.set_idx(h.end)
continue
# Check that we read a correct header
if h.header_size != 0x10:
log.error(
"XML Resource Type Chunk header size does not match 16! "
"At chunk type 0x{:04x}, declared header size={}, "
"chunk size={}".format(h.type, h.header_size, h.size)
)
self._valid = False
return
# Line Number of the source file, only used as meta information
self.m_lineNumber, = unpack('<L', self.buff.read(4))
# Comment_Index (usually 0xFFFFFFFF)
self.m_comment_index, = unpack('<L', self.buff.read(4))
if self.m_comment_index != 0xFFFFFFFF and h.type in [
const.RES_XML_START_NAMESPACE_TYPE,
const.RES_XML_END_NAMESPACE_TYPE]:
log.warning("Unhandled Comment at namespace chunk: '{}'".format(
self.sb[self.m_comment_index])
)
if h.type == const.RES_XML_START_NAMESPACE_TYPE:
prefix, = unpack('<L', self.buff.read(4))
uri, = unpack('<L', self.buff.read(4))
s_prefix = self.sb[prefix]
s_uri = self.sb[uri]
log.debug(
"Start of Namespace mapping: prefix "
"{}: '{}' --> uri {}: '{}'".format(
prefix, s_prefix, uri, s_uri
)
)
if s_uri == '':
log.warning("Namespace prefix '{}' resolves to empty URI. "
"This might be a packer.".format(s_prefix))
if (prefix, uri) in self.namespaces:
log.info(
"Namespace mapping ({}, {}) already seen! "
"This is usually not a problem but could indicate "
"packers or broken AXML compilers.".format(prefix, uri))
self.namespaces.append((prefix, uri))
# We can continue with the next chunk, as we store the namespace
# mappings for each tag
continue
if h.type == const.RES_XML_END_NAMESPACE_TYPE:
# END_PREFIX contains again prefix and uri field
prefix, = unpack('<L', self.buff.read(4))
uri, = unpack('<L', self.buff.read(4))
# We remove the last namespace mapping matching
if (prefix, uri) in self.namespaces:
self.namespaces.remove((prefix, uri))
else:
log.warning(
"Reached a NAMESPACE_END without having the namespace stored before? "
"Prefix ID: {}, URI ID: {}".format(prefix, uri)
)
# We can continue with the next chunk, as we store the namespace
# mappings for each tag
continue
# START_TAG is the start of a new tag.
if h.type == const.RES_XML_START_ELEMENT_TYPE:
# The TAG consists of some fields:
# * (chunk_size, line_number, comment_index - we read before)
# * namespace_uri
# * name
# * flags
# * attribute_count
# * class_attribute
# After that, there are two lists of attributes, 20 bytes each
# Namespace URI (String ID)
self.m_namespaceUri, = unpack('<L', self.buff.read(4))
# Name of the Tag (String ID)
self.m_name, = unpack('<L', self.buff.read(4))
# FIXME: Flags
_ = self.buff.read(4) # noqa
# Attribute Count
attributeCount, = unpack('<L', self.buff.read(4))
# Class Attribute
self.m_classAttribute, = unpack('<L', self.buff.read(4))
self.m_idAttribute = (attributeCount >> 16) - 1
self.m_attribute_count = attributeCount & 0xFFFF
self.m_styleAttribute = (self.m_classAttribute >> 16) - 1
self.m_classAttribute = (self.m_classAttribute & 0xFFFF) - 1
# Now, we parse the attributes.
# Each attribute has 5 fields of 4 byte
for i in range(0, self.m_attribute_count * const.ATTRIBUTE_LENGHT):
# Each field is linearly parsed into the array
# Each Attribute contains:
# * Namespace URI (String ID)
# * Name (String ID)
# * Value
# * Type
# * Data
self.m_attributes.append(unpack('<L', self.buff.read(4))[0])
# Then there are class_attributes
for i in range(const.ATTRIBUTE_IX_VALUE_TYPE, len(self.m_attributes), const.ATTRIBUTE_LENGHT):
self.m_attributes[i] = self.m_attributes[i] >> 24
self.m_event = const.START_TAG
break
if h.type == const.RES_XML_END_ELEMENT_TYPE:
self.m_namespaceUri, = unpack('<L', self.buff.read(4))
self.m_name, = unpack('<L', self.buff.read(4))
self.m_event = const.END_TAG
break
if h.type == const.RES_XML_CDATA_TYPE:
# The CDATA field is like an attribute.
# It contains an index into the String pool
# as well as a typed value.
# usually, this typed value is set to UNDEFINED
# ResStringPool_ref data --> uint32_t index
self.m_name, = unpack('<L', self.buff.read(4))
# Res_value typedData:
# uint16_t size
# uint8_t res0 -> always zero
# uint8_t dataType
# uint32_t data
# For now, we ingore these values
size, res0, dataType, data = unpack("<HBBL", self.buff.read(8))
log.debug(
"found a CDATA Chunk: "
"index={: 6d}, size={: 4d}, res0={: 4d}, "
"dataType={: 4d}, data={: 4d}".format(
self.m_name, size, res0, dataType, data
)
)
self.m_event = const.TEXT
break
# Still here? Looks like we read an unknown XML header, try to skip it...
log.warning("Unknown XML Chunk: 0x{:04x}, skipping {} bytes.".format(h.type, h.size))
self.buff.set_idx(h.end)
@property
def name(self):
"""
Return the String assosciated with the tag name
"""
if self.m_name == -1 or (self.m_event != const.START_TAG and self.m_event != const.END_TAG):
return u''
return self.sb[self.m_name]
@property
def comment(self):
"""
Return the comment at the current position or None if no comment is given
This works only for Tags, as the comments of Namespaces are silently dropped.
Currently, there is no way of retrieving comments of namespaces.
"""
if self.m_comment_index == 0xFFFFFFFF:
return None
return self.sb[self.m_comment_index]
@property
def namespace(self):
"""
Return the Namespace URI (if any) as a String for the current tag
"""
if self.m_name == -1 or (self.m_event != const.START_TAG and self.m_event != const.END_TAG):
return u''
# No Namespace
if self.m_namespaceUri == 0xFFFFFFFF:
return u''
return self.sb[self.m_namespaceUri]
@property
def nsmap(self):
"""
Returns the current namespace mapping as a dictionary
there are several problems with the map and we try to guess a few
things here:
1) a URI can be mapped by many prefixes, so it is to decide which one to take
2) a prefix might map to an empty string (some packers)
3) uri+prefix mappings might be included several times
4) prefix might be empty
"""
NSMAP = dict()
# solve 3) by using a set
for k, v in set(self.namespaces):
s_prefix = self.sb[k]
s_uri = self.sb[v]
# Solve 2) & 4) by not including
if s_uri != "" and s_prefix != "":
# solve 1) by using the last one in the list
NSMAP[s_prefix] = s_uri
return NSMAP
@property
def getName(self):
"""
Legacy only!
use :py:attr:`~androguard.core.bytecodes.AXMLParser.name` instead
"""
return self.name
def getText(self):
"""
Legacy only!
use :py:attr:`~androguard.core.bytecodes.AXMLParser.text` instead
"""
return self.text
def getPrefix(self):
"""
Legacy only!
use :py:attr:`~androguard.core.bytecodes.AXMLParser.namespace` instead
"""
return self.namespace
def _get_attribute_offset(self, index):
"""
Return the start inside the m_attributes array for a given attribute
"""
if self.m_event != const.START_TAG:
log.warning("Current event is not START_TAG.")
offset = index * const.ATTRIBUTE_LENGHT
if offset >= len(self.m_attributes):
log.warning("Invalid attribute index")
return offset
def getAttributeCount(self):
"""
Return the number of Attributes for a Tag
or -1 if not in a tag
"""
if self.m_event != const.START_TAG:
return -1
return self.m_attribute_count
def getAttributeUri(self, index):
"""
Returns the numeric ID for the namespace URI of an attribute
"""
offset = self._get_attribute_offset(index)
uri = self.m_attributes[offset + const.ATTRIBUTE_IX_NAMESPACE_URI]
return uri
def getAttributeNamespace(self, index):
"""
Return the Namespace URI (if any) for the attribute
"""
uri = self.getAttributeUri(index)
# No Namespace
if uri == 0xFFFFFFFF:
return u''
return self.sb[uri]
def getAttributeName(self, index):
"""
Returns the String which represents the attribute name
"""
offset = self._get_attribute_offset(index)
name = self.m_attributes[offset + const.ATTRIBUTE_IX_NAME]
res = self.sb[name]
# If the result is a (null) string, we need to look it up.
if not res:
attr = self.m_resourceIDs[name]
if attr in public.SYSTEM_RESOURCES['attributes']['inverse']:
res = 'android:' + public.SYSTEM_RESOURCES['attributes']['inverse'][attr]
else:
# Attach the HEX Number, so for multiple missing attributes we do not run
# into problems.
res = 'android:UNKNOWN_SYSTEM_ATTRIBUTE_{:08x}'.format(attr)
return res
def getAttributeValueType(self, index):
"""
Return the type of the attribute at the given index
:param index: index of the attribute
"""
offset = self._get_attribute_offset(index)
return self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_TYPE]
def getAttributeValueData(self, index):
"""
Return the data of the attribute at the given index
:param index: index of the attribute
"""
offset = self._get_attribute_offset(index)
return self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_DATA]
def getAttributeValue(self, index):
"""
This function is only used to look up strings
All other work is done by
:func:`~androguard.core.bytecodes.axml.format_value`
# FIXME should unite those functions
:param index: index of the attribute
:return:
"""
offset = self._get_attribute_offset(index)
valueType = self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_TYPE]
if valueType == const.TYPE_STRING:
valueString = self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_STRING]
return self.sb[valueString]
return u''
|
appknox/pyaxmlparser | pyaxmlparser/axmlparser.py | AXMLParser.getAttributeNamespace | python | def getAttributeNamespace(self, index):
uri = self.getAttributeUri(index)
# No Namespace
if uri == 0xFFFFFFFF:
return u''
return self.sb[uri] | Return the Namespace URI (if any) for the attribute | train | https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/axmlparser.py#L510-L520 | [
"def getAttributeUri(self, index):\n \"\"\"\n Returns the numeric ID for the namespace URI of an attribute\n \"\"\"\n offset = self._get_attribute_offset(index)\n uri = self.m_attributes[offset + const.ATTRIBUTE_IX_NAMESPACE_URI]\n\n return uri\n"
] | class AXMLParser(object):
"""
AXMLParser reads through all chunks in the AXML file
and implements a state machine to return information about
the current chunk, which can then be read by :class:`~AXMLPrinter`.
An AXML file is a file which contains multiple chunks of data, defined
by the `ResChunk_header`.
There is no real file magic but as the size of the first header is fixed
and the `type` of the `ResChunk_header` is set to `RES_XML_TYPE`, a file
will usually start with `0x03000800`.
But there are several examples where the `type` is set to something
else, probably in order to fool parsers.
Typically the AXMLParser is used in a loop which terminates if `m_event` is set to `END_DOCUMENT`.
You can use the `next()` function to get the next chunk.
Note that not all chunk types are yielded from the iterator! Some chunks are processed in
the AXMLParser only.
The parser will set `is_valid()` to False if it parses something not valid.
Messages what is wrong are logged.
See http://androidxref.com/9.0.0_r3/xref/frameworks/base/libs/androidfw/include/androidfw/ResourceTypes.h#563
"""
def __init__(self, raw_buff):
self._reset()
self._valid = True
self.axml_tampered = False
self.buff = bytecode.BuffHandle(raw_buff)
# Minimum is a single ARSCHeader, which would be a strange edge case...
if self.buff.size() < 8:
log.error("Filesize is too small to be a valid AXML file! Filesize: {}".format(self.buff.size()))
self._valid = False
return
# This would be even stranger, if an AXML file is larger than 4GB...
# But this is not possible as the maximum chunk size is a unsigned 4 byte int.
if self.buff.size() > 0xFFFFFFFF:
log.error("Filesize is too large to be a valid AXML file! Filesize: {}".format(self.buff.size()))
self._valid = False
return
try:
axml_header = ARSCHeader(self.buff)
except AssertionError as e:
log.error("Error parsing first resource header: %s", e)
self._valid = False
return
self.filesize = axml_header.size
if axml_header.header_size == 28024:
# Can be a common error: the file is not an AXML but a plain XML
# The file will then usually start with '<?xm' / '3C 3F 78 6D'
log.warning("Header size is 28024! Are you trying to parse a plain XML file?")
if axml_header.header_size != 8:
log.error(
"This does not look like an AXML file. "
"header size does not equal 8! header size = {}".format(
axml_header.header_size
)
)
self._valid = False
return
if self.filesize > self.buff.size():
log.error(
"This does not look like an AXML file. "
"Declared filesize does not match real size: {} vs {}".format(
self.filesize, self.buff.size()
)
)
self._valid = False
return
if self.filesize < self.buff.size():
# The file can still be parsed up to the point where the chunk should end.
self.axml_tampered = True
log.warning(
"Declared filesize ({}) is smaller than total file size ({}). "
"Was something appended to the file? Trying to parse it anyways.".format(
self.filesize, self.buff.size()
)
)
# Not that severe of an error, we have plenty files where this is not
# set correctly
if axml_header.type != const.RES_XML_TYPE:
self.axml_tampered = True
log.warning(
"AXML file has an unusual resource type! "
"Malware likes to to such stuff to anti androguard! "
"But we try to parse it anyways. "
"Resource Type: 0x{:04x}".format(axml_header.type)
)
# Now we parse the STRING POOL
try:
header = ARSCHeader(self.buff)
except AssertionError as e:
log.error("Error parsing resource header of string pool: %s", e)
self._valid = False
return
if header.header_size != 0x1C:
log.error(
"This does not look like an AXML file. String chunk header "
"size does not equal 28! header size = {}".format(
header.header_size
)
)
self._valid = False
return
if header.type != const.RES_STRING_POOL_TYPE:
log.error(
"Expected String Pool header, got resource type 0x{:04x} "
"instead".format(header.type)
)
self._valid = False
return
self.sb = StringBlock(self.buff, header)
# Stores resource ID mappings, if any
self.m_resourceIDs = []
# Store a list of prefix/uri mappings encountered
self.namespaces = []
def is_valid(self):
"""
Get the state of the AXMLPrinter.
if an error happend somewhere in the process of parsing the file,
this flag is set to False.
"""
return self._valid
def _reset(self):
self.m_event = -1
self.m_lineNumber = -1
self.m_name = -1
self.m_namespaceUri = -1
self.m_attributes = []
self.m_idAttribute = -1
self.m_classAttribute = -1
self.m_styleAttribute = -1
def __next__(self):
self._do_next()
return self.m_event
def _do_next(self):
if self.m_event == const.END_DOCUMENT:
return
self._reset()
while self._valid:
# Stop at the declared filesize or at the end of the file
if self.buff.end() or self.buff.get_idx() == self.filesize:
self.m_event = const.END_DOCUMENT
break
# Again, we read an ARSCHeader
try:
h = ARSCHeader(self.buff)
except AssertionError as e:
log.error("Error parsing resource header: %s", e)
self._valid = False
return
# Special chunk: Resource Map. This chunk might be contained inside
# the file, after the string pool.
if h.type == const.RES_XML_RESOURCE_MAP_TYPE:
log.debug("AXML contains a RESOURCE MAP")
# Check size: < 8 bytes mean that the chunk is not complete
# Should be aligned to 4 bytes.
if h.size < 8 or (h.size % 4) != 0:
log.error("Invalid chunk size in chunk XML_RESOURCE_MAP")
self._valid = False
return
for i in range((h.size - h.header_size) // 4):
self.m_resourceIDs.append(unpack('<L', self.buff.read(4))[0])
continue
# Parse now the XML chunks.
# unknown chunk types might cause problems, but we can skip them!
if h.type < const.RES_XML_FIRST_CHUNK_TYPE or h.type > const.RES_XML_LAST_CHUNK_TYPE:
# h.size is the size of the whole chunk including the header.
# We read already 8 bytes of the header, thus we need to
# subtract them.
log.error("Not a XML resource chunk type: 0x{:04x}. Skipping {} bytes".format(h.type, h.size))
self.buff.set_idx(h.end)
continue
# Check that we read a correct header
if h.header_size != 0x10:
log.error(
"XML Resource Type Chunk header size does not match 16! "
"At chunk type 0x{:04x}, declared header size={}, "
"chunk size={}".format(h.type, h.header_size, h.size)
)
self._valid = False
return
# Line Number of the source file, only used as meta information
self.m_lineNumber, = unpack('<L', self.buff.read(4))
# Comment_Index (usually 0xFFFFFFFF)
self.m_comment_index, = unpack('<L', self.buff.read(4))
if self.m_comment_index != 0xFFFFFFFF and h.type in [
const.RES_XML_START_NAMESPACE_TYPE,
const.RES_XML_END_NAMESPACE_TYPE]:
log.warning("Unhandled Comment at namespace chunk: '{}'".format(
self.sb[self.m_comment_index])
)
if h.type == const.RES_XML_START_NAMESPACE_TYPE:
prefix, = unpack('<L', self.buff.read(4))
uri, = unpack('<L', self.buff.read(4))
s_prefix = self.sb[prefix]
s_uri = self.sb[uri]
log.debug(
"Start of Namespace mapping: prefix "
"{}: '{}' --> uri {}: '{}'".format(
prefix, s_prefix, uri, s_uri
)
)
if s_uri == '':
log.warning("Namespace prefix '{}' resolves to empty URI. "
"This might be a packer.".format(s_prefix))
if (prefix, uri) in self.namespaces:
log.info(
"Namespace mapping ({}, {}) already seen! "
"This is usually not a problem but could indicate "
"packers or broken AXML compilers.".format(prefix, uri))
self.namespaces.append((prefix, uri))
# We can continue with the next chunk, as we store the namespace
# mappings for each tag
continue
if h.type == const.RES_XML_END_NAMESPACE_TYPE:
# END_PREFIX contains again prefix and uri field
prefix, = unpack('<L', self.buff.read(4))
uri, = unpack('<L', self.buff.read(4))
# We remove the last namespace mapping matching
if (prefix, uri) in self.namespaces:
self.namespaces.remove((prefix, uri))
else:
log.warning(
"Reached a NAMESPACE_END without having the namespace stored before? "
"Prefix ID: {}, URI ID: {}".format(prefix, uri)
)
# We can continue with the next chunk, as we store the namespace
# mappings for each tag
continue
# START_TAG is the start of a new tag.
if h.type == const.RES_XML_START_ELEMENT_TYPE:
# The TAG consists of some fields:
# * (chunk_size, line_number, comment_index - we read before)
# * namespace_uri
# * name
# * flags
# * attribute_count
# * class_attribute
# After that, there are two lists of attributes, 20 bytes each
# Namespace URI (String ID)
self.m_namespaceUri, = unpack('<L', self.buff.read(4))
# Name of the Tag (String ID)
self.m_name, = unpack('<L', self.buff.read(4))
# FIXME: Flags
_ = self.buff.read(4) # noqa
# Attribute Count
attributeCount, = unpack('<L', self.buff.read(4))
# Class Attribute
self.m_classAttribute, = unpack('<L', self.buff.read(4))
self.m_idAttribute = (attributeCount >> 16) - 1
self.m_attribute_count = attributeCount & 0xFFFF
self.m_styleAttribute = (self.m_classAttribute >> 16) - 1
self.m_classAttribute = (self.m_classAttribute & 0xFFFF) - 1
# Now, we parse the attributes.
# Each attribute has 5 fields of 4 byte
for i in range(0, self.m_attribute_count * const.ATTRIBUTE_LENGHT):
# Each field is linearly parsed into the array
# Each Attribute contains:
# * Namespace URI (String ID)
# * Name (String ID)
# * Value
# * Type
# * Data
self.m_attributes.append(unpack('<L', self.buff.read(4))[0])
# Then there are class_attributes
for i in range(const.ATTRIBUTE_IX_VALUE_TYPE, len(self.m_attributes), const.ATTRIBUTE_LENGHT):
self.m_attributes[i] = self.m_attributes[i] >> 24
self.m_event = const.START_TAG
break
if h.type == const.RES_XML_END_ELEMENT_TYPE:
self.m_namespaceUri, = unpack('<L', self.buff.read(4))
self.m_name, = unpack('<L', self.buff.read(4))
self.m_event = const.END_TAG
break
if h.type == const.RES_XML_CDATA_TYPE:
# The CDATA field is like an attribute.
# It contains an index into the String pool
# as well as a typed value.
# usually, this typed value is set to UNDEFINED
# ResStringPool_ref data --> uint32_t index
self.m_name, = unpack('<L', self.buff.read(4))
# Res_value typedData:
# uint16_t size
# uint8_t res0 -> always zero
# uint8_t dataType
# uint32_t data
# For now, we ingore these values
size, res0, dataType, data = unpack("<HBBL", self.buff.read(8))
log.debug(
"found a CDATA Chunk: "
"index={: 6d}, size={: 4d}, res0={: 4d}, "
"dataType={: 4d}, data={: 4d}".format(
self.m_name, size, res0, dataType, data
)
)
self.m_event = const.TEXT
break
# Still here? Looks like we read an unknown XML header, try to skip it...
log.warning("Unknown XML Chunk: 0x{:04x}, skipping {} bytes.".format(h.type, h.size))
self.buff.set_idx(h.end)
@property
def name(self):
"""
Return the String assosciated with the tag name
"""
if self.m_name == -1 or (self.m_event != const.START_TAG and self.m_event != const.END_TAG):
return u''
return self.sb[self.m_name]
@property
def comment(self):
"""
Return the comment at the current position or None if no comment is given
This works only for Tags, as the comments of Namespaces are silently dropped.
Currently, there is no way of retrieving comments of namespaces.
"""
if self.m_comment_index == 0xFFFFFFFF:
return None
return self.sb[self.m_comment_index]
@property
def namespace(self):
"""
Return the Namespace URI (if any) as a String for the current tag
"""
if self.m_name == -1 or (self.m_event != const.START_TAG and self.m_event != const.END_TAG):
return u''
# No Namespace
if self.m_namespaceUri == 0xFFFFFFFF:
return u''
return self.sb[self.m_namespaceUri]
@property
def nsmap(self):
"""
Returns the current namespace mapping as a dictionary
there are several problems with the map and we try to guess a few
things here:
1) a URI can be mapped by many prefixes, so it is to decide which one to take
2) a prefix might map to an empty string (some packers)
3) uri+prefix mappings might be included several times
4) prefix might be empty
"""
NSMAP = dict()
# solve 3) by using a set
for k, v in set(self.namespaces):
s_prefix = self.sb[k]
s_uri = self.sb[v]
# Solve 2) & 4) by not including
if s_uri != "" and s_prefix != "":
# solve 1) by using the last one in the list
NSMAP[s_prefix] = s_uri
return NSMAP
@property
def text(self):
"""
Return the String assosicated with the current text
"""
if self.m_name == -1 or self.m_event != const.TEXT:
return u''
return self.sb[self.m_name]
def getName(self):
"""
Legacy only!
use :py:attr:`~androguard.core.bytecodes.AXMLParser.name` instead
"""
return self.name
def getText(self):
"""
Legacy only!
use :py:attr:`~androguard.core.bytecodes.AXMLParser.text` instead
"""
return self.text
def getPrefix(self):
"""
Legacy only!
use :py:attr:`~androguard.core.bytecodes.AXMLParser.namespace` instead
"""
return self.namespace
def _get_attribute_offset(self, index):
"""
Return the start inside the m_attributes array for a given attribute
"""
if self.m_event != const.START_TAG:
log.warning("Current event is not START_TAG.")
offset = index * const.ATTRIBUTE_LENGHT
if offset >= len(self.m_attributes):
log.warning("Invalid attribute index")
return offset
def getAttributeCount(self):
"""
Return the number of Attributes for a Tag
or -1 if not in a tag
"""
if self.m_event != const.START_TAG:
return -1
return self.m_attribute_count
def getAttributeUri(self, index):
"""
Returns the numeric ID for the namespace URI of an attribute
"""
offset = self._get_attribute_offset(index)
uri = self.m_attributes[offset + const.ATTRIBUTE_IX_NAMESPACE_URI]
return uri
def getAttributeName(self, index):
"""
Returns the String which represents the attribute name
"""
offset = self._get_attribute_offset(index)
name = self.m_attributes[offset + const.ATTRIBUTE_IX_NAME]
res = self.sb[name]
# If the result is a (null) string, we need to look it up.
if not res:
attr = self.m_resourceIDs[name]
if attr in public.SYSTEM_RESOURCES['attributes']['inverse']:
res = 'android:' + public.SYSTEM_RESOURCES['attributes']['inverse'][attr]
else:
# Attach the HEX Number, so for multiple missing attributes we do not run
# into problems.
res = 'android:UNKNOWN_SYSTEM_ATTRIBUTE_{:08x}'.format(attr)
return res
def getAttributeValueType(self, index):
"""
Return the type of the attribute at the given index
:param index: index of the attribute
"""
offset = self._get_attribute_offset(index)
return self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_TYPE]
def getAttributeValueData(self, index):
"""
Return the data of the attribute at the given index
:param index: index of the attribute
"""
offset = self._get_attribute_offset(index)
return self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_DATA]
def getAttributeValue(self, index):
"""
This function is only used to look up strings
All other work is done by
:func:`~androguard.core.bytecodes.axml.format_value`
# FIXME should unite those functions
:param index: index of the attribute
:return:
"""
offset = self._get_attribute_offset(index)
valueType = self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_TYPE]
if valueType == const.TYPE_STRING:
valueString = self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_STRING]
return self.sb[valueString]
return u''
|
appknox/pyaxmlparser | pyaxmlparser/axmlparser.py | AXMLParser.getAttributeName | python | def getAttributeName(self, index):
offset = self._get_attribute_offset(index)
name = self.m_attributes[offset + const.ATTRIBUTE_IX_NAME]
res = self.sb[name]
# If the result is a (null) string, we need to look it up.
if not res:
attr = self.m_resourceIDs[name]
if attr in public.SYSTEM_RESOURCES['attributes']['inverse']:
res = 'android:' + public.SYSTEM_RESOURCES['attributes']['inverse'][attr]
else:
# Attach the HEX Number, so for multiple missing attributes we do not run
# into problems.
res = 'android:UNKNOWN_SYSTEM_ATTRIBUTE_{:08x}'.format(attr)
return res | Returns the String which represents the attribute name | train | https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/axmlparser.py#L522-L540 | [
"def _get_attribute_offset(self, index):\n \"\"\"\n Return the start inside the m_attributes array for a given attribute\n \"\"\"\n if self.m_event != const.START_TAG:\n log.warning(\"Current event is not START_TAG.\")\n\n offset = index * const.ATTRIBUTE_LENGHT\n if offset >= len(self.m_at... | class AXMLParser(object):
"""
AXMLParser reads through all chunks in the AXML file
and implements a state machine to return information about
the current chunk, which can then be read by :class:`~AXMLPrinter`.
An AXML file is a file which contains multiple chunks of data, defined
by the `ResChunk_header`.
There is no real file magic but as the size of the first header is fixed
and the `type` of the `ResChunk_header` is set to `RES_XML_TYPE`, a file
will usually start with `0x03000800`.
But there are several examples where the `type` is set to something
else, probably in order to fool parsers.
Typically the AXMLParser is used in a loop which terminates if `m_event` is set to `END_DOCUMENT`.
You can use the `next()` function to get the next chunk.
Note that not all chunk types are yielded from the iterator! Some chunks are processed in
the AXMLParser only.
The parser will set `is_valid()` to False if it parses something not valid.
Messages what is wrong are logged.
See http://androidxref.com/9.0.0_r3/xref/frameworks/base/libs/androidfw/include/androidfw/ResourceTypes.h#563
"""
def __init__(self, raw_buff):
self._reset()
self._valid = True
self.axml_tampered = False
self.buff = bytecode.BuffHandle(raw_buff)
# Minimum is a single ARSCHeader, which would be a strange edge case...
if self.buff.size() < 8:
log.error("Filesize is too small to be a valid AXML file! Filesize: {}".format(self.buff.size()))
self._valid = False
return
# This would be even stranger, if an AXML file is larger than 4GB...
# But this is not possible as the maximum chunk size is a unsigned 4 byte int.
if self.buff.size() > 0xFFFFFFFF:
log.error("Filesize is too large to be a valid AXML file! Filesize: {}".format(self.buff.size()))
self._valid = False
return
try:
axml_header = ARSCHeader(self.buff)
except AssertionError as e:
log.error("Error parsing first resource header: %s", e)
self._valid = False
return
self.filesize = axml_header.size
if axml_header.header_size == 28024:
# Can be a common error: the file is not an AXML but a plain XML
# The file will then usually start with '<?xm' / '3C 3F 78 6D'
log.warning("Header size is 28024! Are you trying to parse a plain XML file?")
if axml_header.header_size != 8:
log.error(
"This does not look like an AXML file. "
"header size does not equal 8! header size = {}".format(
axml_header.header_size
)
)
self._valid = False
return
if self.filesize > self.buff.size():
log.error(
"This does not look like an AXML file. "
"Declared filesize does not match real size: {} vs {}".format(
self.filesize, self.buff.size()
)
)
self._valid = False
return
if self.filesize < self.buff.size():
# The file can still be parsed up to the point where the chunk should end.
self.axml_tampered = True
log.warning(
"Declared filesize ({}) is smaller than total file size ({}). "
"Was something appended to the file? Trying to parse it anyways.".format(
self.filesize, self.buff.size()
)
)
# Not that severe of an error, we have plenty files where this is not
# set correctly
if axml_header.type != const.RES_XML_TYPE:
self.axml_tampered = True
log.warning(
"AXML file has an unusual resource type! "
"Malware likes to to such stuff to anti androguard! "
"But we try to parse it anyways. "
"Resource Type: 0x{:04x}".format(axml_header.type)
)
# Now we parse the STRING POOL
try:
header = ARSCHeader(self.buff)
except AssertionError as e:
log.error("Error parsing resource header of string pool: %s", e)
self._valid = False
return
if header.header_size != 0x1C:
log.error(
"This does not look like an AXML file. String chunk header "
"size does not equal 28! header size = {}".format(
header.header_size
)
)
self._valid = False
return
if header.type != const.RES_STRING_POOL_TYPE:
log.error(
"Expected String Pool header, got resource type 0x{:04x} "
"instead".format(header.type)
)
self._valid = False
return
self.sb = StringBlock(self.buff, header)
# Stores resource ID mappings, if any
self.m_resourceIDs = []
# Store a list of prefix/uri mappings encountered
self.namespaces = []
def is_valid(self):
"""
Get the state of the AXMLPrinter.
if an error happend somewhere in the process of parsing the file,
this flag is set to False.
"""
return self._valid
def _reset(self):
self.m_event = -1
self.m_lineNumber = -1
self.m_name = -1
self.m_namespaceUri = -1
self.m_attributes = []
self.m_idAttribute = -1
self.m_classAttribute = -1
self.m_styleAttribute = -1
def __next__(self):
self._do_next()
return self.m_event
def _do_next(self):
if self.m_event == const.END_DOCUMENT:
return
self._reset()
while self._valid:
# Stop at the declared filesize or at the end of the file
if self.buff.end() or self.buff.get_idx() == self.filesize:
self.m_event = const.END_DOCUMENT
break
# Again, we read an ARSCHeader
try:
h = ARSCHeader(self.buff)
except AssertionError as e:
log.error("Error parsing resource header: %s", e)
self._valid = False
return
# Special chunk: Resource Map. This chunk might be contained inside
# the file, after the string pool.
if h.type == const.RES_XML_RESOURCE_MAP_TYPE:
log.debug("AXML contains a RESOURCE MAP")
# Check size: < 8 bytes mean that the chunk is not complete
# Should be aligned to 4 bytes.
if h.size < 8 or (h.size % 4) != 0:
log.error("Invalid chunk size in chunk XML_RESOURCE_MAP")
self._valid = False
return
for i in range((h.size - h.header_size) // 4):
self.m_resourceIDs.append(unpack('<L', self.buff.read(4))[0])
continue
# Parse now the XML chunks.
# unknown chunk types might cause problems, but we can skip them!
if h.type < const.RES_XML_FIRST_CHUNK_TYPE or h.type > const.RES_XML_LAST_CHUNK_TYPE:
# h.size is the size of the whole chunk including the header.
# We read already 8 bytes of the header, thus we need to
# subtract them.
log.error("Not a XML resource chunk type: 0x{:04x}. Skipping {} bytes".format(h.type, h.size))
self.buff.set_idx(h.end)
continue
# Check that we read a correct header
if h.header_size != 0x10:
log.error(
"XML Resource Type Chunk header size does not match 16! "
"At chunk type 0x{:04x}, declared header size={}, "
"chunk size={}".format(h.type, h.header_size, h.size)
)
self._valid = False
return
# Line Number of the source file, only used as meta information
self.m_lineNumber, = unpack('<L', self.buff.read(4))
# Comment_Index (usually 0xFFFFFFFF)
self.m_comment_index, = unpack('<L', self.buff.read(4))
if self.m_comment_index != 0xFFFFFFFF and h.type in [
const.RES_XML_START_NAMESPACE_TYPE,
const.RES_XML_END_NAMESPACE_TYPE]:
log.warning("Unhandled Comment at namespace chunk: '{}'".format(
self.sb[self.m_comment_index])
)
if h.type == const.RES_XML_START_NAMESPACE_TYPE:
prefix, = unpack('<L', self.buff.read(4))
uri, = unpack('<L', self.buff.read(4))
s_prefix = self.sb[prefix]
s_uri = self.sb[uri]
log.debug(
"Start of Namespace mapping: prefix "
"{}: '{}' --> uri {}: '{}'".format(
prefix, s_prefix, uri, s_uri
)
)
if s_uri == '':
log.warning("Namespace prefix '{}' resolves to empty URI. "
"This might be a packer.".format(s_prefix))
if (prefix, uri) in self.namespaces:
log.info(
"Namespace mapping ({}, {}) already seen! "
"This is usually not a problem but could indicate "
"packers or broken AXML compilers.".format(prefix, uri))
self.namespaces.append((prefix, uri))
# We can continue with the next chunk, as we store the namespace
# mappings for each tag
continue
if h.type == const.RES_XML_END_NAMESPACE_TYPE:
# END_PREFIX contains again prefix and uri field
prefix, = unpack('<L', self.buff.read(4))
uri, = unpack('<L', self.buff.read(4))
# We remove the last namespace mapping matching
if (prefix, uri) in self.namespaces:
self.namespaces.remove((prefix, uri))
else:
log.warning(
"Reached a NAMESPACE_END without having the namespace stored before? "
"Prefix ID: {}, URI ID: {}".format(prefix, uri)
)
# We can continue with the next chunk, as we store the namespace
# mappings for each tag
continue
# START_TAG is the start of a new tag.
if h.type == const.RES_XML_START_ELEMENT_TYPE:
# The TAG consists of some fields:
# * (chunk_size, line_number, comment_index - we read before)
# * namespace_uri
# * name
# * flags
# * attribute_count
# * class_attribute
# After that, there are two lists of attributes, 20 bytes each
# Namespace URI (String ID)
self.m_namespaceUri, = unpack('<L', self.buff.read(4))
# Name of the Tag (String ID)
self.m_name, = unpack('<L', self.buff.read(4))
# FIXME: Flags
_ = self.buff.read(4) # noqa
# Attribute Count
attributeCount, = unpack('<L', self.buff.read(4))
# Class Attribute
self.m_classAttribute, = unpack('<L', self.buff.read(4))
self.m_idAttribute = (attributeCount >> 16) - 1
self.m_attribute_count = attributeCount & 0xFFFF
self.m_styleAttribute = (self.m_classAttribute >> 16) - 1
self.m_classAttribute = (self.m_classAttribute & 0xFFFF) - 1
# Now, we parse the attributes.
# Each attribute has 5 fields of 4 byte
for i in range(0, self.m_attribute_count * const.ATTRIBUTE_LENGHT):
# Each field is linearly parsed into the array
# Each Attribute contains:
# * Namespace URI (String ID)
# * Name (String ID)
# * Value
# * Type
# * Data
self.m_attributes.append(unpack('<L', self.buff.read(4))[0])
# Then there are class_attributes
for i in range(const.ATTRIBUTE_IX_VALUE_TYPE, len(self.m_attributes), const.ATTRIBUTE_LENGHT):
self.m_attributes[i] = self.m_attributes[i] >> 24
self.m_event = const.START_TAG
break
if h.type == const.RES_XML_END_ELEMENT_TYPE:
self.m_namespaceUri, = unpack('<L', self.buff.read(4))
self.m_name, = unpack('<L', self.buff.read(4))
self.m_event = const.END_TAG
break
if h.type == const.RES_XML_CDATA_TYPE:
# The CDATA field is like an attribute.
# It contains an index into the String pool
# as well as a typed value.
# usually, this typed value is set to UNDEFINED
# ResStringPool_ref data --> uint32_t index
self.m_name, = unpack('<L', self.buff.read(4))
# Res_value typedData:
# uint16_t size
# uint8_t res0 -> always zero
# uint8_t dataType
# uint32_t data
# For now, we ingore these values
size, res0, dataType, data = unpack("<HBBL", self.buff.read(8))
log.debug(
"found a CDATA Chunk: "
"index={: 6d}, size={: 4d}, res0={: 4d}, "
"dataType={: 4d}, data={: 4d}".format(
self.m_name, size, res0, dataType, data
)
)
self.m_event = const.TEXT
break
# Still here? Looks like we read an unknown XML header, try to skip it...
log.warning("Unknown XML Chunk: 0x{:04x}, skipping {} bytes.".format(h.type, h.size))
self.buff.set_idx(h.end)
@property
def name(self):
"""
Return the String assosciated with the tag name
"""
if self.m_name == -1 or (self.m_event != const.START_TAG and self.m_event != const.END_TAG):
return u''
return self.sb[self.m_name]
@property
def comment(self):
"""
Return the comment at the current position or None if no comment is given
This works only for Tags, as the comments of Namespaces are silently dropped.
Currently, there is no way of retrieving comments of namespaces.
"""
if self.m_comment_index == 0xFFFFFFFF:
return None
return self.sb[self.m_comment_index]
@property
def namespace(self):
"""
Return the Namespace URI (if any) as a String for the current tag
"""
if self.m_name == -1 or (self.m_event != const.START_TAG and self.m_event != const.END_TAG):
return u''
# No Namespace
if self.m_namespaceUri == 0xFFFFFFFF:
return u''
return self.sb[self.m_namespaceUri]
@property
def nsmap(self):
"""
Returns the current namespace mapping as a dictionary
there are several problems with the map and we try to guess a few
things here:
1) a URI can be mapped by many prefixes, so it is to decide which one to take
2) a prefix might map to an empty string (some packers)
3) uri+prefix mappings might be included several times
4) prefix might be empty
"""
NSMAP = dict()
# solve 3) by using a set
for k, v in set(self.namespaces):
s_prefix = self.sb[k]
s_uri = self.sb[v]
# Solve 2) & 4) by not including
if s_uri != "" and s_prefix != "":
# solve 1) by using the last one in the list
NSMAP[s_prefix] = s_uri
return NSMAP
@property
def text(self):
"""
Return the String assosicated with the current text
"""
if self.m_name == -1 or self.m_event != const.TEXT:
return u''
return self.sb[self.m_name]
def getName(self):
"""
Legacy only!
use :py:attr:`~androguard.core.bytecodes.AXMLParser.name` instead
"""
return self.name
def getText(self):
"""
Legacy only!
use :py:attr:`~androguard.core.bytecodes.AXMLParser.text` instead
"""
return self.text
def getPrefix(self):
"""
Legacy only!
use :py:attr:`~androguard.core.bytecodes.AXMLParser.namespace` instead
"""
return self.namespace
def _get_attribute_offset(self, index):
"""
Return the start inside the m_attributes array for a given attribute
"""
if self.m_event != const.START_TAG:
log.warning("Current event is not START_TAG.")
offset = index * const.ATTRIBUTE_LENGHT
if offset >= len(self.m_attributes):
log.warning("Invalid attribute index")
return offset
def getAttributeCount(self):
"""
Return the number of Attributes for a Tag
or -1 if not in a tag
"""
if self.m_event != const.START_TAG:
return -1
return self.m_attribute_count
def getAttributeUri(self, index):
"""
Returns the numeric ID for the namespace URI of an attribute
"""
offset = self._get_attribute_offset(index)
uri = self.m_attributes[offset + const.ATTRIBUTE_IX_NAMESPACE_URI]
return uri
def getAttributeNamespace(self, index):
"""
Return the Namespace URI (if any) for the attribute
"""
uri = self.getAttributeUri(index)
# No Namespace
if uri == 0xFFFFFFFF:
return u''
return self.sb[uri]
def getAttributeValueType(self, index):
"""
Return the type of the attribute at the given index
:param index: index of the attribute
"""
offset = self._get_attribute_offset(index)
return self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_TYPE]
def getAttributeValueData(self, index):
"""
Return the data of the attribute at the given index
:param index: index of the attribute
"""
offset = self._get_attribute_offset(index)
return self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_DATA]
def getAttributeValue(self, index):
"""
This function is only used to look up strings
All other work is done by
:func:`~androguard.core.bytecodes.axml.format_value`
# FIXME should unite those functions
:param index: index of the attribute
:return:
"""
offset = self._get_attribute_offset(index)
valueType = self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_TYPE]
if valueType == const.TYPE_STRING:
valueString = self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_STRING]
return self.sb[valueString]
return u''
|
appknox/pyaxmlparser | pyaxmlparser/axmlparser.py | AXMLParser.getAttributeValueType | python | def getAttributeValueType(self, index):
offset = self._get_attribute_offset(index)
return self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_TYPE] | Return the type of the attribute at the given index
:param index: index of the attribute | train | https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/axmlparser.py#L542-L549 | [
"def _get_attribute_offset(self, index):\n \"\"\"\n Return the start inside the m_attributes array for a given attribute\n \"\"\"\n if self.m_event != const.START_TAG:\n log.warning(\"Current event is not START_TAG.\")\n\n offset = index * const.ATTRIBUTE_LENGHT\n if offset >= len(self.m_at... | class AXMLParser(object):
"""
AXMLParser reads through all chunks in the AXML file
and implements a state machine to return information about
the current chunk, which can then be read by :class:`~AXMLPrinter`.
An AXML file is a file which contains multiple chunks of data, defined
by the `ResChunk_header`.
There is no real file magic but as the size of the first header is fixed
and the `type` of the `ResChunk_header` is set to `RES_XML_TYPE`, a file
will usually start with `0x03000800`.
But there are several examples where the `type` is set to something
else, probably in order to fool parsers.
Typically the AXMLParser is used in a loop which terminates if `m_event` is set to `END_DOCUMENT`.
You can use the `next()` function to get the next chunk.
Note that not all chunk types are yielded from the iterator! Some chunks are processed in
the AXMLParser only.
The parser will set `is_valid()` to False if it parses something not valid.
Messages what is wrong are logged.
See http://androidxref.com/9.0.0_r3/xref/frameworks/base/libs/androidfw/include/androidfw/ResourceTypes.h#563
"""
def __init__(self, raw_buff):
self._reset()
self._valid = True
self.axml_tampered = False
self.buff = bytecode.BuffHandle(raw_buff)
# Minimum is a single ARSCHeader, which would be a strange edge case...
if self.buff.size() < 8:
log.error("Filesize is too small to be a valid AXML file! Filesize: {}".format(self.buff.size()))
self._valid = False
return
# This would be even stranger, if an AXML file is larger than 4GB...
# But this is not possible as the maximum chunk size is a unsigned 4 byte int.
if self.buff.size() > 0xFFFFFFFF:
log.error("Filesize is too large to be a valid AXML file! Filesize: {}".format(self.buff.size()))
self._valid = False
return
try:
axml_header = ARSCHeader(self.buff)
except AssertionError as e:
log.error("Error parsing first resource header: %s", e)
self._valid = False
return
self.filesize = axml_header.size
if axml_header.header_size == 28024:
# Can be a common error: the file is not an AXML but a plain XML
# The file will then usually start with '<?xm' / '3C 3F 78 6D'
log.warning("Header size is 28024! Are you trying to parse a plain XML file?")
if axml_header.header_size != 8:
log.error(
"This does not look like an AXML file. "
"header size does not equal 8! header size = {}".format(
axml_header.header_size
)
)
self._valid = False
return
if self.filesize > self.buff.size():
log.error(
"This does not look like an AXML file. "
"Declared filesize does not match real size: {} vs {}".format(
self.filesize, self.buff.size()
)
)
self._valid = False
return
if self.filesize < self.buff.size():
# The file can still be parsed up to the point where the chunk should end.
self.axml_tampered = True
log.warning(
"Declared filesize ({}) is smaller than total file size ({}). "
"Was something appended to the file? Trying to parse it anyways.".format(
self.filesize, self.buff.size()
)
)
# Not that severe of an error, we have plenty files where this is not
# set correctly
if axml_header.type != const.RES_XML_TYPE:
self.axml_tampered = True
log.warning(
"AXML file has an unusual resource type! "
"Malware likes to to such stuff to anti androguard! "
"But we try to parse it anyways. "
"Resource Type: 0x{:04x}".format(axml_header.type)
)
# Now we parse the STRING POOL
try:
header = ARSCHeader(self.buff)
except AssertionError as e:
log.error("Error parsing resource header of string pool: %s", e)
self._valid = False
return
if header.header_size != 0x1C:
log.error(
"This does not look like an AXML file. String chunk header "
"size does not equal 28! header size = {}".format(
header.header_size
)
)
self._valid = False
return
if header.type != const.RES_STRING_POOL_TYPE:
log.error(
"Expected String Pool header, got resource type 0x{:04x} "
"instead".format(header.type)
)
self._valid = False
return
self.sb = StringBlock(self.buff, header)
# Stores resource ID mappings, if any
self.m_resourceIDs = []
# Store a list of prefix/uri mappings encountered
self.namespaces = []
def is_valid(self):
"""
Get the state of the AXMLPrinter.
if an error happend somewhere in the process of parsing the file,
this flag is set to False.
"""
return self._valid
def _reset(self):
self.m_event = -1
self.m_lineNumber = -1
self.m_name = -1
self.m_namespaceUri = -1
self.m_attributes = []
self.m_idAttribute = -1
self.m_classAttribute = -1
self.m_styleAttribute = -1
def __next__(self):
self._do_next()
return self.m_event
def _do_next(self):
if self.m_event == const.END_DOCUMENT:
return
self._reset()
while self._valid:
# Stop at the declared filesize or at the end of the file
if self.buff.end() or self.buff.get_idx() == self.filesize:
self.m_event = const.END_DOCUMENT
break
# Again, we read an ARSCHeader
try:
h = ARSCHeader(self.buff)
except AssertionError as e:
log.error("Error parsing resource header: %s", e)
self._valid = False
return
# Special chunk: Resource Map. This chunk might be contained inside
# the file, after the string pool.
if h.type == const.RES_XML_RESOURCE_MAP_TYPE:
log.debug("AXML contains a RESOURCE MAP")
# Check size: < 8 bytes mean that the chunk is not complete
# Should be aligned to 4 bytes.
if h.size < 8 or (h.size % 4) != 0:
log.error("Invalid chunk size in chunk XML_RESOURCE_MAP")
self._valid = False
return
for i in range((h.size - h.header_size) // 4):
self.m_resourceIDs.append(unpack('<L', self.buff.read(4))[0])
continue
# Parse now the XML chunks.
# unknown chunk types might cause problems, but we can skip them!
if h.type < const.RES_XML_FIRST_CHUNK_TYPE or h.type > const.RES_XML_LAST_CHUNK_TYPE:
# h.size is the size of the whole chunk including the header.
# We read already 8 bytes of the header, thus we need to
# subtract them.
log.error("Not a XML resource chunk type: 0x{:04x}. Skipping {} bytes".format(h.type, h.size))
self.buff.set_idx(h.end)
continue
# Check that we read a correct header
if h.header_size != 0x10:
log.error(
"XML Resource Type Chunk header size does not match 16! "
"At chunk type 0x{:04x}, declared header size={}, "
"chunk size={}".format(h.type, h.header_size, h.size)
)
self._valid = False
return
# Line Number of the source file, only used as meta information
self.m_lineNumber, = unpack('<L', self.buff.read(4))
# Comment_Index (usually 0xFFFFFFFF)
self.m_comment_index, = unpack('<L', self.buff.read(4))
if self.m_comment_index != 0xFFFFFFFF and h.type in [
const.RES_XML_START_NAMESPACE_TYPE,
const.RES_XML_END_NAMESPACE_TYPE]:
log.warning("Unhandled Comment at namespace chunk: '{}'".format(
self.sb[self.m_comment_index])
)
if h.type == const.RES_XML_START_NAMESPACE_TYPE:
prefix, = unpack('<L', self.buff.read(4))
uri, = unpack('<L', self.buff.read(4))
s_prefix = self.sb[prefix]
s_uri = self.sb[uri]
log.debug(
"Start of Namespace mapping: prefix "
"{}: '{}' --> uri {}: '{}'".format(
prefix, s_prefix, uri, s_uri
)
)
if s_uri == '':
log.warning("Namespace prefix '{}' resolves to empty URI. "
"This might be a packer.".format(s_prefix))
if (prefix, uri) in self.namespaces:
log.info(
"Namespace mapping ({}, {}) already seen! "
"This is usually not a problem but could indicate "
"packers or broken AXML compilers.".format(prefix, uri))
self.namespaces.append((prefix, uri))
# We can continue with the next chunk, as we store the namespace
# mappings for each tag
continue
if h.type == const.RES_XML_END_NAMESPACE_TYPE:
# END_PREFIX contains again prefix and uri field
prefix, = unpack('<L', self.buff.read(4))
uri, = unpack('<L', self.buff.read(4))
# We remove the last namespace mapping matching
if (prefix, uri) in self.namespaces:
self.namespaces.remove((prefix, uri))
else:
log.warning(
"Reached a NAMESPACE_END without having the namespace stored before? "
"Prefix ID: {}, URI ID: {}".format(prefix, uri)
)
# We can continue with the next chunk, as we store the namespace
# mappings for each tag
continue
# START_TAG is the start of a new tag.
if h.type == const.RES_XML_START_ELEMENT_TYPE:
# The TAG consists of some fields:
# * (chunk_size, line_number, comment_index - we read before)
# * namespace_uri
# * name
# * flags
# * attribute_count
# * class_attribute
# After that, there are two lists of attributes, 20 bytes each
# Namespace URI (String ID)
self.m_namespaceUri, = unpack('<L', self.buff.read(4))
# Name of the Tag (String ID)
self.m_name, = unpack('<L', self.buff.read(4))
# FIXME: Flags
_ = self.buff.read(4) # noqa
# Attribute Count
attributeCount, = unpack('<L', self.buff.read(4))
# Class Attribute
self.m_classAttribute, = unpack('<L', self.buff.read(4))
self.m_idAttribute = (attributeCount >> 16) - 1
self.m_attribute_count = attributeCount & 0xFFFF
self.m_styleAttribute = (self.m_classAttribute >> 16) - 1
self.m_classAttribute = (self.m_classAttribute & 0xFFFF) - 1
# Now, we parse the attributes.
# Each attribute has 5 fields of 4 byte
for i in range(0, self.m_attribute_count * const.ATTRIBUTE_LENGHT):
# Each field is linearly parsed into the array
# Each Attribute contains:
# * Namespace URI (String ID)
# * Name (String ID)
# * Value
# * Type
# * Data
self.m_attributes.append(unpack('<L', self.buff.read(4))[0])
# Then there are class_attributes
for i in range(const.ATTRIBUTE_IX_VALUE_TYPE, len(self.m_attributes), const.ATTRIBUTE_LENGHT):
self.m_attributes[i] = self.m_attributes[i] >> 24
self.m_event = const.START_TAG
break
if h.type == const.RES_XML_END_ELEMENT_TYPE:
self.m_namespaceUri, = unpack('<L', self.buff.read(4))
self.m_name, = unpack('<L', self.buff.read(4))
self.m_event = const.END_TAG
break
if h.type == const.RES_XML_CDATA_TYPE:
# The CDATA field is like an attribute.
# It contains an index into the String pool
# as well as a typed value.
# usually, this typed value is set to UNDEFINED
# ResStringPool_ref data --> uint32_t index
self.m_name, = unpack('<L', self.buff.read(4))
# Res_value typedData:
# uint16_t size
# uint8_t res0 -> always zero
# uint8_t dataType
# uint32_t data
# For now, we ingore these values
size, res0, dataType, data = unpack("<HBBL", self.buff.read(8))
log.debug(
"found a CDATA Chunk: "
"index={: 6d}, size={: 4d}, res0={: 4d}, "
"dataType={: 4d}, data={: 4d}".format(
self.m_name, size, res0, dataType, data
)
)
self.m_event = const.TEXT
break
# Still here? Looks like we read an unknown XML header, try to skip it...
log.warning("Unknown XML Chunk: 0x{:04x}, skipping {} bytes.".format(h.type, h.size))
self.buff.set_idx(h.end)
@property
def name(self):
"""
Return the String assosciated with the tag name
"""
if self.m_name == -1 or (self.m_event != const.START_TAG and self.m_event != const.END_TAG):
return u''
return self.sb[self.m_name]
@property
def comment(self):
"""
Return the comment at the current position or None if no comment is given
This works only for Tags, as the comments of Namespaces are silently dropped.
Currently, there is no way of retrieving comments of namespaces.
"""
if self.m_comment_index == 0xFFFFFFFF:
return None
return self.sb[self.m_comment_index]
@property
def namespace(self):
"""
Return the Namespace URI (if any) as a String for the current tag
"""
if self.m_name == -1 or (self.m_event != const.START_TAG and self.m_event != const.END_TAG):
return u''
# No Namespace
if self.m_namespaceUri == 0xFFFFFFFF:
return u''
return self.sb[self.m_namespaceUri]
@property
def nsmap(self):
"""
Returns the current namespace mapping as a dictionary
there are several problems with the map and we try to guess a few
things here:
1) a URI can be mapped by many prefixes, so it is to decide which one to take
2) a prefix might map to an empty string (some packers)
3) uri+prefix mappings might be included several times
4) prefix might be empty
"""
NSMAP = dict()
# solve 3) by using a set
for k, v in set(self.namespaces):
s_prefix = self.sb[k]
s_uri = self.sb[v]
# Solve 2) & 4) by not including
if s_uri != "" and s_prefix != "":
# solve 1) by using the last one in the list
NSMAP[s_prefix] = s_uri
return NSMAP
@property
def text(self):
"""
Return the String assosicated with the current text
"""
if self.m_name == -1 or self.m_event != const.TEXT:
return u''
return self.sb[self.m_name]
def getName(self):
"""
Legacy only!
use :py:attr:`~androguard.core.bytecodes.AXMLParser.name` instead
"""
return self.name
def getText(self):
"""
Legacy only!
use :py:attr:`~androguard.core.bytecodes.AXMLParser.text` instead
"""
return self.text
def getPrefix(self):
"""
Legacy only!
use :py:attr:`~androguard.core.bytecodes.AXMLParser.namespace` instead
"""
return self.namespace
def _get_attribute_offset(self, index):
"""
Return the start inside the m_attributes array for a given attribute
"""
if self.m_event != const.START_TAG:
log.warning("Current event is not START_TAG.")
offset = index * const.ATTRIBUTE_LENGHT
if offset >= len(self.m_attributes):
log.warning("Invalid attribute index")
return offset
def getAttributeCount(self):
"""
Return the number of Attributes for a Tag
or -1 if not in a tag
"""
if self.m_event != const.START_TAG:
return -1
return self.m_attribute_count
def getAttributeUri(self, index):
"""
Returns the numeric ID for the namespace URI of an attribute
"""
offset = self._get_attribute_offset(index)
uri = self.m_attributes[offset + const.ATTRIBUTE_IX_NAMESPACE_URI]
return uri
def getAttributeNamespace(self, index):
"""
Return the Namespace URI (if any) for the attribute
"""
uri = self.getAttributeUri(index)
# No Namespace
if uri == 0xFFFFFFFF:
return u''
return self.sb[uri]
def getAttributeName(self, index):
"""
Returns the String which represents the attribute name
"""
offset = self._get_attribute_offset(index)
name = self.m_attributes[offset + const.ATTRIBUTE_IX_NAME]
res = self.sb[name]
# If the result is a (null) string, we need to look it up.
if not res:
attr = self.m_resourceIDs[name]
if attr in public.SYSTEM_RESOURCES['attributes']['inverse']:
res = 'android:' + public.SYSTEM_RESOURCES['attributes']['inverse'][attr]
else:
# Attach the HEX Number, so for multiple missing attributes we do not run
# into problems.
res = 'android:UNKNOWN_SYSTEM_ATTRIBUTE_{:08x}'.format(attr)
return res
def getAttributeValueData(self, index):
"""
Return the data of the attribute at the given index
:param index: index of the attribute
"""
offset = self._get_attribute_offset(index)
return self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_DATA]
def getAttributeValue(self, index):
"""
This function is only used to look up strings
All other work is done by
:func:`~androguard.core.bytecodes.axml.format_value`
# FIXME should unite those functions
:param index: index of the attribute
:return:
"""
offset = self._get_attribute_offset(index)
valueType = self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_TYPE]
if valueType == const.TYPE_STRING:
valueString = self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_STRING]
return self.sb[valueString]
return u''
|
appknox/pyaxmlparser | pyaxmlparser/axmlparser.py | AXMLParser.getAttributeValueData | python | def getAttributeValueData(self, index):
offset = self._get_attribute_offset(index)
return self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_DATA] | Return the data of the attribute at the given index
:param index: index of the attribute | train | https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/axmlparser.py#L551-L558 | [
"def _get_attribute_offset(self, index):\n \"\"\"\n Return the start inside the m_attributes array for a given attribute\n \"\"\"\n if self.m_event != const.START_TAG:\n log.warning(\"Current event is not START_TAG.\")\n\n offset = index * const.ATTRIBUTE_LENGHT\n if offset >= len(self.m_at... | class AXMLParser(object):
"""
AXMLParser reads through all chunks in the AXML file
and implements a state machine to return information about
the current chunk, which can then be read by :class:`~AXMLPrinter`.
An AXML file is a file which contains multiple chunks of data, defined
by the `ResChunk_header`.
There is no real file magic but as the size of the first header is fixed
and the `type` of the `ResChunk_header` is set to `RES_XML_TYPE`, a file
will usually start with `0x03000800`.
But there are several examples where the `type` is set to something
else, probably in order to fool parsers.
Typically the AXMLParser is used in a loop which terminates if `m_event` is set to `END_DOCUMENT`.
You can use the `next()` function to get the next chunk.
Note that not all chunk types are yielded from the iterator! Some chunks are processed in
the AXMLParser only.
The parser will set `is_valid()` to False if it parses something not valid.
Messages what is wrong are logged.
See http://androidxref.com/9.0.0_r3/xref/frameworks/base/libs/androidfw/include/androidfw/ResourceTypes.h#563
"""
def __init__(self, raw_buff):
self._reset()
self._valid = True
self.axml_tampered = False
self.buff = bytecode.BuffHandle(raw_buff)
# Minimum is a single ARSCHeader, which would be a strange edge case...
if self.buff.size() < 8:
log.error("Filesize is too small to be a valid AXML file! Filesize: {}".format(self.buff.size()))
self._valid = False
return
# This would be even stranger, if an AXML file is larger than 4GB...
# But this is not possible as the maximum chunk size is a unsigned 4 byte int.
if self.buff.size() > 0xFFFFFFFF:
log.error("Filesize is too large to be a valid AXML file! Filesize: {}".format(self.buff.size()))
self._valid = False
return
try:
axml_header = ARSCHeader(self.buff)
except AssertionError as e:
log.error("Error parsing first resource header: %s", e)
self._valid = False
return
self.filesize = axml_header.size
if axml_header.header_size == 28024:
# Can be a common error: the file is not an AXML but a plain XML
# The file will then usually start with '<?xm' / '3C 3F 78 6D'
log.warning("Header size is 28024! Are you trying to parse a plain XML file?")
if axml_header.header_size != 8:
log.error(
"This does not look like an AXML file. "
"header size does not equal 8! header size = {}".format(
axml_header.header_size
)
)
self._valid = False
return
if self.filesize > self.buff.size():
log.error(
"This does not look like an AXML file. "
"Declared filesize does not match real size: {} vs {}".format(
self.filesize, self.buff.size()
)
)
self._valid = False
return
if self.filesize < self.buff.size():
# The file can still be parsed up to the point where the chunk should end.
self.axml_tampered = True
log.warning(
"Declared filesize ({}) is smaller than total file size ({}). "
"Was something appended to the file? Trying to parse it anyways.".format(
self.filesize, self.buff.size()
)
)
# Not that severe of an error, we have plenty files where this is not
# set correctly
if axml_header.type != const.RES_XML_TYPE:
self.axml_tampered = True
log.warning(
"AXML file has an unusual resource type! "
"Malware likes to to such stuff to anti androguard! "
"But we try to parse it anyways. "
"Resource Type: 0x{:04x}".format(axml_header.type)
)
# Now we parse the STRING POOL
try:
header = ARSCHeader(self.buff)
except AssertionError as e:
log.error("Error parsing resource header of string pool: %s", e)
self._valid = False
return
if header.header_size != 0x1C:
log.error(
"This does not look like an AXML file. String chunk header "
"size does not equal 28! header size = {}".format(
header.header_size
)
)
self._valid = False
return
if header.type != const.RES_STRING_POOL_TYPE:
log.error(
"Expected String Pool header, got resource type 0x{:04x} "
"instead".format(header.type)
)
self._valid = False
return
self.sb = StringBlock(self.buff, header)
# Stores resource ID mappings, if any
self.m_resourceIDs = []
# Store a list of prefix/uri mappings encountered
self.namespaces = []
def is_valid(self):
"""
Get the state of the AXMLPrinter.
if an error happend somewhere in the process of parsing the file,
this flag is set to False.
"""
return self._valid
def _reset(self):
self.m_event = -1
self.m_lineNumber = -1
self.m_name = -1
self.m_namespaceUri = -1
self.m_attributes = []
self.m_idAttribute = -1
self.m_classAttribute = -1
self.m_styleAttribute = -1
def __next__(self):
self._do_next()
return self.m_event
def _do_next(self):
if self.m_event == const.END_DOCUMENT:
return
self._reset()
while self._valid:
# Stop at the declared filesize or at the end of the file
if self.buff.end() or self.buff.get_idx() == self.filesize:
self.m_event = const.END_DOCUMENT
break
# Again, we read an ARSCHeader
try:
h = ARSCHeader(self.buff)
except AssertionError as e:
log.error("Error parsing resource header: %s", e)
self._valid = False
return
# Special chunk: Resource Map. This chunk might be contained inside
# the file, after the string pool.
if h.type == const.RES_XML_RESOURCE_MAP_TYPE:
log.debug("AXML contains a RESOURCE MAP")
# Check size: < 8 bytes mean that the chunk is not complete
# Should be aligned to 4 bytes.
if h.size < 8 or (h.size % 4) != 0:
log.error("Invalid chunk size in chunk XML_RESOURCE_MAP")
self._valid = False
return
for i in range((h.size - h.header_size) // 4):
self.m_resourceIDs.append(unpack('<L', self.buff.read(4))[0])
continue
# Parse now the XML chunks.
# unknown chunk types might cause problems, but we can skip them!
if h.type < const.RES_XML_FIRST_CHUNK_TYPE or h.type > const.RES_XML_LAST_CHUNK_TYPE:
# h.size is the size of the whole chunk including the header.
# We read already 8 bytes of the header, thus we need to
# subtract them.
log.error("Not a XML resource chunk type: 0x{:04x}. Skipping {} bytes".format(h.type, h.size))
self.buff.set_idx(h.end)
continue
# Check that we read a correct header
if h.header_size != 0x10:
log.error(
"XML Resource Type Chunk header size does not match 16! "
"At chunk type 0x{:04x}, declared header size={}, "
"chunk size={}".format(h.type, h.header_size, h.size)
)
self._valid = False
return
# Line Number of the source file, only used as meta information
self.m_lineNumber, = unpack('<L', self.buff.read(4))
# Comment_Index (usually 0xFFFFFFFF)
self.m_comment_index, = unpack('<L', self.buff.read(4))
if self.m_comment_index != 0xFFFFFFFF and h.type in [
const.RES_XML_START_NAMESPACE_TYPE,
const.RES_XML_END_NAMESPACE_TYPE]:
log.warning("Unhandled Comment at namespace chunk: '{}'".format(
self.sb[self.m_comment_index])
)
if h.type == const.RES_XML_START_NAMESPACE_TYPE:
prefix, = unpack('<L', self.buff.read(4))
uri, = unpack('<L', self.buff.read(4))
s_prefix = self.sb[prefix]
s_uri = self.sb[uri]
log.debug(
"Start of Namespace mapping: prefix "
"{}: '{}' --> uri {}: '{}'".format(
prefix, s_prefix, uri, s_uri
)
)
if s_uri == '':
log.warning("Namespace prefix '{}' resolves to empty URI. "
"This might be a packer.".format(s_prefix))
if (prefix, uri) in self.namespaces:
log.info(
"Namespace mapping ({}, {}) already seen! "
"This is usually not a problem but could indicate "
"packers or broken AXML compilers.".format(prefix, uri))
self.namespaces.append((prefix, uri))
# We can continue with the next chunk, as we store the namespace
# mappings for each tag
continue
if h.type == const.RES_XML_END_NAMESPACE_TYPE:
# END_PREFIX contains again prefix and uri field
prefix, = unpack('<L', self.buff.read(4))
uri, = unpack('<L', self.buff.read(4))
# We remove the last namespace mapping matching
if (prefix, uri) in self.namespaces:
self.namespaces.remove((prefix, uri))
else:
log.warning(
"Reached a NAMESPACE_END without having the namespace stored before? "
"Prefix ID: {}, URI ID: {}".format(prefix, uri)
)
# We can continue with the next chunk, as we store the namespace
# mappings for each tag
continue
# START_TAG is the start of a new tag.
if h.type == const.RES_XML_START_ELEMENT_TYPE:
# The TAG consists of some fields:
# * (chunk_size, line_number, comment_index - we read before)
# * namespace_uri
# * name
# * flags
# * attribute_count
# * class_attribute
# After that, there are two lists of attributes, 20 bytes each
# Namespace URI (String ID)
self.m_namespaceUri, = unpack('<L', self.buff.read(4))
# Name of the Tag (String ID)
self.m_name, = unpack('<L', self.buff.read(4))
# FIXME: Flags
_ = self.buff.read(4) # noqa
# Attribute Count
attributeCount, = unpack('<L', self.buff.read(4))
# Class Attribute
self.m_classAttribute, = unpack('<L', self.buff.read(4))
self.m_idAttribute = (attributeCount >> 16) - 1
self.m_attribute_count = attributeCount & 0xFFFF
self.m_styleAttribute = (self.m_classAttribute >> 16) - 1
self.m_classAttribute = (self.m_classAttribute & 0xFFFF) - 1
# Now, we parse the attributes.
# Each attribute has 5 fields of 4 byte
for i in range(0, self.m_attribute_count * const.ATTRIBUTE_LENGHT):
# Each field is linearly parsed into the array
# Each Attribute contains:
# * Namespace URI (String ID)
# * Name (String ID)
# * Value
# * Type
# * Data
self.m_attributes.append(unpack('<L', self.buff.read(4))[0])
# Then there are class_attributes
for i in range(const.ATTRIBUTE_IX_VALUE_TYPE, len(self.m_attributes), const.ATTRIBUTE_LENGHT):
self.m_attributes[i] = self.m_attributes[i] >> 24
self.m_event = const.START_TAG
break
if h.type == const.RES_XML_END_ELEMENT_TYPE:
self.m_namespaceUri, = unpack('<L', self.buff.read(4))
self.m_name, = unpack('<L', self.buff.read(4))
self.m_event = const.END_TAG
break
if h.type == const.RES_XML_CDATA_TYPE:
# The CDATA field is like an attribute.
# It contains an index into the String pool
# as well as a typed value.
# usually, this typed value is set to UNDEFINED
# ResStringPool_ref data --> uint32_t index
self.m_name, = unpack('<L', self.buff.read(4))
# Res_value typedData:
# uint16_t size
# uint8_t res0 -> always zero
# uint8_t dataType
# uint32_t data
# For now, we ingore these values
size, res0, dataType, data = unpack("<HBBL", self.buff.read(8))
log.debug(
"found a CDATA Chunk: "
"index={: 6d}, size={: 4d}, res0={: 4d}, "
"dataType={: 4d}, data={: 4d}".format(
self.m_name, size, res0, dataType, data
)
)
self.m_event = const.TEXT
break
# Still here? Looks like we read an unknown XML header, try to skip it...
log.warning("Unknown XML Chunk: 0x{:04x}, skipping {} bytes.".format(h.type, h.size))
self.buff.set_idx(h.end)
@property
def name(self):
"""
Return the String assosciated with the tag name
"""
if self.m_name == -1 or (self.m_event != const.START_TAG and self.m_event != const.END_TAG):
return u''
return self.sb[self.m_name]
@property
def comment(self):
"""
Return the comment at the current position or None if no comment is given
This works only for Tags, as the comments of Namespaces are silently dropped.
Currently, there is no way of retrieving comments of namespaces.
"""
if self.m_comment_index == 0xFFFFFFFF:
return None
return self.sb[self.m_comment_index]
@property
def namespace(self):
"""
Return the Namespace URI (if any) as a String for the current tag
"""
if self.m_name == -1 or (self.m_event != const.START_TAG and self.m_event != const.END_TAG):
return u''
# No Namespace
if self.m_namespaceUri == 0xFFFFFFFF:
return u''
return self.sb[self.m_namespaceUri]
@property
def nsmap(self):
"""
Returns the current namespace mapping as a dictionary
there are several problems with the map and we try to guess a few
things here:
1) a URI can be mapped by many prefixes, so it is to decide which one to take
2) a prefix might map to an empty string (some packers)
3) uri+prefix mappings might be included several times
4) prefix might be empty
"""
NSMAP = dict()
# solve 3) by using a set
for k, v in set(self.namespaces):
s_prefix = self.sb[k]
s_uri = self.sb[v]
# Solve 2) & 4) by not including
if s_uri != "" and s_prefix != "":
# solve 1) by using the last one in the list
NSMAP[s_prefix] = s_uri
return NSMAP
@property
def text(self):
"""
Return the String assosicated with the current text
"""
if self.m_name == -1 or self.m_event != const.TEXT:
return u''
return self.sb[self.m_name]
def getName(self):
"""
Legacy only!
use :py:attr:`~androguard.core.bytecodes.AXMLParser.name` instead
"""
return self.name
def getText(self):
"""
Legacy only!
use :py:attr:`~androguard.core.bytecodes.AXMLParser.text` instead
"""
return self.text
def getPrefix(self):
"""
Legacy only!
use :py:attr:`~androguard.core.bytecodes.AXMLParser.namespace` instead
"""
return self.namespace
def _get_attribute_offset(self, index):
"""
Return the start inside the m_attributes array for a given attribute
"""
if self.m_event != const.START_TAG:
log.warning("Current event is not START_TAG.")
offset = index * const.ATTRIBUTE_LENGHT
if offset >= len(self.m_attributes):
log.warning("Invalid attribute index")
return offset
def getAttributeCount(self):
"""
Return the number of Attributes for a Tag
or -1 if not in a tag
"""
if self.m_event != const.START_TAG:
return -1
return self.m_attribute_count
def getAttributeUri(self, index):
"""
Returns the numeric ID for the namespace URI of an attribute
"""
offset = self._get_attribute_offset(index)
uri = self.m_attributes[offset + const.ATTRIBUTE_IX_NAMESPACE_URI]
return uri
def getAttributeNamespace(self, index):
"""
Return the Namespace URI (if any) for the attribute
"""
uri = self.getAttributeUri(index)
# No Namespace
if uri == 0xFFFFFFFF:
return u''
return self.sb[uri]
def getAttributeName(self, index):
"""
Returns the String which represents the attribute name
"""
offset = self._get_attribute_offset(index)
name = self.m_attributes[offset + const.ATTRIBUTE_IX_NAME]
res = self.sb[name]
# If the result is a (null) string, we need to look it up.
if not res:
attr = self.m_resourceIDs[name]
if attr in public.SYSTEM_RESOURCES['attributes']['inverse']:
res = 'android:' + public.SYSTEM_RESOURCES['attributes']['inverse'][attr]
else:
# Attach the HEX Number, so for multiple missing attributes we do not run
# into problems.
res = 'android:UNKNOWN_SYSTEM_ATTRIBUTE_{:08x}'.format(attr)
return res
def getAttributeValueType(self, index):
"""
Return the type of the attribute at the given index
:param index: index of the attribute
"""
offset = self._get_attribute_offset(index)
return self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_TYPE]
def getAttributeValue(self, index):
"""
This function is only used to look up strings
All other work is done by
:func:`~androguard.core.bytecodes.axml.format_value`
# FIXME should unite those functions
:param index: index of the attribute
:return:
"""
offset = self._get_attribute_offset(index)
valueType = self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_TYPE]
if valueType == const.TYPE_STRING:
valueString = self.m_attributes[offset + const.ATTRIBUTE_IX_VALUE_STRING]
return self.sb[valueString]
return u''
|
appknox/pyaxmlparser | pyaxmlparser/arscutil.py | ARSCResTableConfig.get_qualifier | python | def get_qualifier(self):
res = []
mcc = self.imsi & 0xFFFF
mnc = (self.imsi & 0xFFFF0000) >> 16
if mcc != 0:
res.append("mcc%d" % mcc)
if mnc != 0:
res.append("mnc%d" % mnc)
if self.locale != 0:
res.append(self.get_language_and_region())
screenLayout = self.screenConfig & 0xff
if (screenLayout & const.MASK_LAYOUTDIR) != 0:
if screenLayout & const.MASK_LAYOUTDIR == const.LAYOUTDIR_LTR:
res.append("ldltr")
elif screenLayout & const.MASK_LAYOUTDIR == const.LAYOUTDIR_RTL:
res.append("ldrtl")
else:
res.append("layoutDir_%d" % (screenLayout & const.MASK_LAYOUTDIR))
smallestScreenWidthDp = (self.screenConfig & 0xFFFF0000) >> 16
if smallestScreenWidthDp != 0:
res.append("sw%ddp" % smallestScreenWidthDp)
screenWidthDp = self.screenSizeDp & 0xFFFF
screenHeightDp = (self.screenSizeDp & 0xFFFF0000) >> 16
if screenWidthDp != 0:
res.append("w%ddp" % screenWidthDp)
if screenHeightDp != 0:
res.append("h%ddp" % screenHeightDp)
if (screenLayout & const.MASK_SCREENSIZE) != const.SCREENSIZE_ANY:
if screenLayout & const.MASK_SCREENSIZE == const.SCREENSIZE_SMALL:
res.append("small")
elif screenLayout & const.MASK_SCREENSIZE == const.SCREENSIZE_NORMAL:
res.append("normal")
elif screenLayout & const.MASK_SCREENSIZE == const.SCREENSIZE_LARGE:
res.append("large")
elif screenLayout & const.MASK_SCREENSIZE == const.SCREENSIZE_XLARGE:
res.append("xlarge")
else:
res.append("screenLayoutSize_%d" % (screenLayout & const.MASK_SCREENSIZE))
if (screenLayout & const.MASK_SCREENLONG) != 0:
if screenLayout & const.MASK_SCREENLONG == const.SCREENLONG_NO:
res.append("notlong")
elif screenLayout & const.MASK_SCREENLONG == const.SCREENLONG_YES:
res.append("long")
else:
res.append("screenLayoutLong_%d" % (screenLayout & const.MASK_SCREENLONG))
density = (self.screenType & 0xffff0000) >> 16
if density != const.DENSITY_DEFAULT:
if density == const.DENSITY_LOW:
res.append("ldpi")
elif density == const.DENSITY_MEDIUM:
res.append("mdpi")
elif density == const.DENSITY_TV:
res.append("tvdpi")
elif density == const.DENSITY_HIGH:
res.append("hdpi")
elif density == const.DENSITY_XHIGH:
res.append("xhdpi")
elif density == const.DENSITY_XXHIGH:
res.append("xxhdpi")
elif density == const.DENSITY_XXXHIGH:
res.append("xxxhdpi")
elif density == const.DENSITY_NONE:
res.append("nodpi")
elif density == const.DENSITY_ANY:
res.append("anydpi")
else:
res.append("%ddpi" % (density))
touchscreen = (self.screenType & 0xff00) >> 8
if touchscreen != const.TOUCHSCREEN_ANY:
if touchscreen == const.TOUCHSCREEN_NOTOUCH:
res.append("notouch")
elif touchscreen == const.TOUCHSCREEN_FINGER:
res.append("finger")
elif touchscreen == const.TOUCHSCREEN_STYLUS:
res.append("stylus")
else:
res.append("touchscreen_%d" % touchscreen)
screenSize = self.screenSize
if screenSize != 0:
screenWidth = self.screenSize & 0xffff
screenHeight = (self.screenSize & 0xffff0000) >> 16
res.append("%dx%d" % (screenWidth, screenHeight))
version = self.version
if version != 0:
sdkVersion = self.version & 0xffff
minorVersion = (self.version & 0xffff0000) >> 16
res.append("v%d" % sdkVersion)
if minorVersion != 0:
res.append(".%d" % minorVersion)
return "-".join(res) | Return resource name qualifier for the current configuration.
for example
* `ldpi-v4`
* `hdpi-v4`
All possible qualifiers are listed in table 2 of https://developer.android.com/guide
/topics/resources/providing-resources
FIXME: This name might not have all properties set!
:return: str | train | https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/arscutil.py#L351-L463 | [
"def get_language_and_region(self):\n \"\"\"\n Returns the combined language+region string or \\x00\\x00 for the default locale\n :return:\n \"\"\"\n if self.locale != 0:\n _language = self._unpack_language_or_region(\n [self.locale & 0xff, (self.locale & 0xff00) >> 8, ], ord('a'))\... | class ARSCResTableConfig(object):
"""
ARSCResTableConfig contains the configuration for specific resource selection.
This is used on the device to determine which resources should be loaded
based on different properties of the device like locale or displaysize.
See the definition of ResTable_config in
http://androidxref.com/9.0.0_r3/xref/frameworks/base/libs/androidfw/include/androidfw/ResourceTypes.h#911
"""
@classmethod
def default_config(cls):
if not hasattr(cls, 'DEFAULT'):
cls.DEFAULT = ARSCResTableConfig(None)
return cls.DEFAULT
def __init__(self, buff=None, **kwargs):
if buff is not None:
self.start = buff.get_idx()
# uint32_t
self.size = unpack('<I', buff.read(4))[0]
# union: uint16_t mcc, uint16_t mnc
# 0 means any
self.imsi = unpack('<I', buff.read(4))[0]
# uint32_t as chars \0\0 means any
# either two 7bit ASCII representing the ISO-639-1 language code
# or a single 16bit LE value representing ISO-639-2 3 letter code
self.locale = unpack('<I', buff.read(4))[0]
# struct of:
# uint8_t orientation
# uint8_t touchscreen
# uint8_t density
self.screenType = unpack('<I', buff.read(4))[0]
# struct of
# uint8_t keyboard
# uint8_t navigation
# uint8_t inputFlags
# uint8_t inputPad0
self.input = unpack('<I', buff.read(4))[0]
# struct of
# uint16_t screenWidth
# uint16_t screenHeight
self.screenSize = unpack('<I', buff.read(4))[0]
# struct of
# uint16_t sdkVersion
# uint16_t minorVersion which should be always 0, as the meaning is not defined
self.version = unpack('<I', buff.read(4))[0]
# The next three fields seems to be optional
if self.size >= 32:
# struct of
# uint8_t screenLayout
# uint8_t uiMode
# uint16_t smallestScreenWidthDp
self.screenConfig, = unpack('<I', buff.read(4))
else:
log.debug("This file does not have a screenConfig! size={}".format(self.size))
self.screenConfig = 0
if self.size >= 36:
# struct of
# uint16_t screenWidthDp
# uint16_t screenHeightDp
self.screenSizeDp, = unpack('<I', buff.read(4))
else:
log.debug("This file does not have a screenSizeDp! size={}".format(self.size))
self.screenSizeDp = 0
if self.size >= 40:
# struct of
# uint8_t screenLayout2
# uint8_t colorMode
# uint16_t screenConfigPad2
self.screenConfig2, = unpack("<I", buff.read(4))
else:
log.debug("This file does not have a screenConfig2! size={}".format(self.size))
self.screenConfig2 = 0
self.exceedingSize = self.size - (buff.tell() - self.start)
if self.exceedingSize > 0:
log.debug("Skipping padding bytes!")
self.padding = buff.read(self.exceedingSize)
else:
self.start = 0
self.size = 0
self.imsi = \
((kwargs.pop('mcc', 0) & 0xffff) << 0) + \
((kwargs.pop('mnc', 0) & 0xffff) << 16)
self.locale = 0
for char_ix, char in kwargs.pop('locale', "")[0:4]:
self.locale += (ord(char) << (char_ix * 8))
self.screenType = \
((kwargs.pop('orientation', 0) & 0xff) << 0) + \
((kwargs.pop('touchscreen', 0) & 0xff) << 8) + \
((kwargs.pop('density', 0) & 0xffff) << 16)
self.input = \
((kwargs.pop('keyboard', 0) & 0xff) << 0) + \
((kwargs.pop('navigation', 0) & 0xff) << 8) + \
((kwargs.pop('inputFlags', 0) & 0xff) << 16) + \
((kwargs.pop('inputPad0', 0) & 0xff) << 24)
self.screenSize = \
((kwargs.pop('screenWidth', 0) & 0xffff) << 0) + \
((kwargs.pop('screenHeight', 0) & 0xffff) << 16)
self.version = \
((kwargs.pop('sdkVersion', 0) & 0xffff) << 0) + \
((kwargs.pop('minorVersion', 0) & 0xffff) << 16)
self.screenConfig = \
((kwargs.pop('screenLayout', 0) & 0xff) << 0) + \
((kwargs.pop('uiMode', 0) & 0xff) << 8) + \
((kwargs.pop('smallestScreenWidthDp', 0) & 0xffff) << 16)
self.screenSizeDp = \
((kwargs.pop('screenWidthDp', 0) & 0xffff) << 0) + \
((kwargs.pop('screenHeightDp', 0) & 0xffff) << 16)
# TODO add this some day...
self.screenConfig2 = 0
self.exceedingSize = 0
def _unpack_language_or_region(self, char_in, char_base):
char_out = ""
if char_in[0] & 0x80:
first = char_in[1] & 0x1f
second = ((char_in[1] & 0xe0) >> 5) + ((char_in[0] & 0x03) << 3)
third = (char_in[0] & 0x7c) >> 2
char_out += chr(first + char_base)
char_out += chr(second + char_base)
char_out += chr(third + char_base)
else:
if char_in[0]:
char_out += chr(char_in[0])
if char_in[1]:
char_out += chr(char_in[1])
return char_out
def get_language_and_region(self):
"""
Returns the combined language+region string or \x00\x00 for the default locale
:return:
"""
if self.locale != 0:
_language = self._unpack_language_or_region(
[self.locale & 0xff, (self.locale & 0xff00) >> 8, ], ord('a'))
_region = self._unpack_language_or_region(
[
(self.locale & 0xff0000) >> 16,
(self.locale & 0xff000000) >> 24,
], ord('0')
)
return (_language + "-r" + _region) if _region else _language
return "\x00\x00"
def get_config_name_friendly(self):
"""
Here for legacy reasons.
use :meth:`~get_qualifier` instead.
"""
return self.get_qualifier()
def get_language(self):
x = self.locale & 0x0000ffff
return chr(x & 0x00ff) + chr((x & 0xff00) >> 8)
def get_country(self):
x = (self.locale & 0xffff0000) >> 16
return chr(x & 0x00ff) + chr((x & 0xff00) >> 8)
def get_density(self):
x = ((self.screenType >> 16) & 0xffff)
return x
def is_default(self):
"""
Test if this is a default resource, which matches all
This is indicated that all fields are zero.
:return: True if default, False otherwise
"""
return all(map(lambda x: x == 0, self._get_tuple()))
def _get_tuple(self):
return (
self.imsi,
self.locale,
self.screenType,
self.input,
self.screenSize,
self.version,
self.screenConfig,
self.screenSizeDp,
self.screenConfig2,
)
def __hash__(self):
return hash(self._get_tuple())
def __eq__(self, other):
return self._get_tuple() == other._get_tuple()
def __repr__(self):
return "<ARSCResTableConfig '{}'='{}'>".format(self.get_qualifier(), repr(self._get_tuple()))
|
ownport/scrapy-dblite | dblite/__init__.py | copy | python | def copy(src, trg, transform=None):
''' copy items with optional fields transformation
'''
source = open(src[0], src[1])
target = open(trg[0], trg[1], autocommit=1000)
for item in source.get():
item = dict(item)
if '_id' in item:
del item['_id']
if transform:
item = transform(item)
target.put(trg[0](item))
source.close()
target.commit()
target.close() | copy items with optional fields transformation | train | https://github.com/ownport/scrapy-dblite/blob/6de5021caa31d439478d9808738b046d1db699c9/dblite/__init__.py#L34-L51 | [
"def open(item, uri, autocommit=False):\n ''' open sqlite database by uri and Item class\n '''\n return Storage(item, uri, autocommit)\n",
"def transform(item):\n return item\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# simple library for stroring python dictionaries in sqlite database
#
__author__ = 'Andrey Usov <https://github.com/ownport/scrapy-dblite>'
__version__ = '0.2.7'
import os
import re
import inspect
import sqlite3
from .query import SQLBuilder
from urlparse import urlparse
from .settings import SUPPORTED_BACKENDS
from .settings import ITEMS_PER_REQUEST
class DuplicateItem(Exception):
pass
class SQLError(Exception):
pass
def open(item, uri, autocommit=False):
''' open sqlite database by uri and Item class
'''
return Storage(item, uri, autocommit)
def _regexp(expr, item):
''' REGEXP function for Sqlite
'''
reg = re.compile(expr)
return reg.search(item) is not None
class Storage(object):
''' Storage
store simple dictionaries in sqlite database
'''
def __init__(self, item, uri, autocommit=False):
''' __init__
item - Scrapy item class
uri - URI to sqlite database, sqlite://<sqlite-database>:<table>
autocommit - few variations are possible: boolean (False/True) or integer
True - autocommit after each put()
False - no autocommit, commit() only manual
[integer] - autocommit after N[integer] put()
'''
self._item_class = item
self._fields = dict()
#self._fieldnames = None
database, table = self.parse_uri(uri)
# database file
if database:
self._db = database
else:
raise RuntimeError('Empty database name, "%s"' % database)
# database table
if table:
self._table = table.split(' ')[0]
else:
raise RuntimeError('Empty table name, "%s"' % table)
# sqlite connection
try:
self._conn = sqlite3.connect(database)
except sqlite3.OperationalError, err:
raise RuntimeError("%s, database: %s" % (err, database))
self._conn.row_factory = self._dict_factory
self._conn.create_function("REGEXP", 2, _regexp)
# sqlite cursor
self._cursor = self._conn.cursor()
# autocommit data after put()
self._autocommit = autocommit
# commit counter increased every time after put without commit()
self._commit_counter = 0
self._create_table(self._table)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
@staticmethod
def _dict_factory(cursor, row):
''' factory for sqlite3 to return results as dict
'''
d = {}
for idx, col in enumerate(cursor.description):
if col[0] == 'rowid':
d['_id'] = row[idx]
else:
d[col[0]] = row[idx]
return d
@staticmethod
def parse_uri(uri):
''' parse URI
'''
if not uri or uri.find('://') <= 0:
raise RuntimeError('Incorrect URI definition: {}'.format(uri))
backend, rest_uri = uri.split('://')
if backend not in SUPPORTED_BACKENDS:
raise RuntimeError('Unknown backend: {}'.format(backend))
database, table = rest_uri.rsplit(':',1)
return database, table
@property
def fieldnames(self):
''' return fieldnames
'''
if not self._fields:
if self._item_class is not None:
for m in inspect.getmembers(self._item_class):
if m[0] == 'fields' and isinstance(m[1], dict):
self._fields = m[1]
if not self._fields:
raise RuntimeError('Unknown item type, no fields: %s' % self._item_class)
else:
raise RuntimeError('Item class is not defined, %s' % self._item_class)
return self._fields.keys()
def _create_table(self, table_name):
''' create sqlite's table for storing simple dictionaries
'''
if self.fieldnames:
sql_fields = []
for field in self._fields:
if field != '_id':
if 'dblite' in self._fields[field]:
sql_fields.append(' '.join([field, self._fields[field]['dblite']]))
else:
sql_fields.append(field)
sql_fields = ','.join(sql_fields)
SQL = 'CREATE TABLE IF NOT EXISTS %s (%s);' % (table_name, sql_fields)
try:
self._cursor.execute(SQL)
except sqlite3.OperationalError, err:
raise RuntimeError('Create table error, %s, SQL: %s' % (err, SQL))
def _make_item(self, item):
''' make Item class
'''
for field in self._item_class.fields:
if (field in item) and ('dblite_serializer' in self._item_class.fields[field]):
serializer = self._item_class.fields[field]['dblite_serializer']
item[field] = serializer.loads(item[field])
return self._item_class(item)
def get(self, criteria=None, offset=None, limit=None):
''' returns items selected by criteria
If the criteria is not defined, get() returns all items.
'''
if criteria is None and limit is None:
return self._get_all()
elif limit is not None and limit == 1:
return self.get_one(criteria)
else:
return self._get_with_criteria(criteria, offset=offset, limit=limit)
def _get_all(self):
''' return all items
'''
rowid = 0
while True:
SQL_SELECT_MANY = 'SELECT rowid, * FROM %s WHERE rowid > ? LIMIT ?;' % self._table
self._cursor.execute(SQL_SELECT_MANY, (rowid, ITEMS_PER_REQUEST))
items = self._cursor.fetchall()
if len(items) == 0:
break
for item in items:
rowid = item['_id']
yield self._make_item(item)
def _get_with_criteria(self, criteria, offset=None, limit=None):
''' returns items selected by criteria
'''
SQL = SQLBuilder(self._table, criteria).select(offset=offset, limit=limit)
self._cursor.execute(SQL)
for item in self._cursor.fetchall():
yield self._make_item(item)
def get_one(self, criteria):
''' return one item
'''
try:
items = [item for item in self._get_with_criteria(criteria, limit=1)]
return items[0]
except:
return None
def _do_autocommit(self):
''' perform autocommit
'''
# commit()
self._commit_counter += 1
# autocommit as boolean
if isinstance(self._autocommit, bool) and self._autocommit:
self.commit()
self._commit_counter = 0
# autocommit as counter
elif isinstance(self._autocommit, int) and self._autocommit > 0:
if (self._commit_counter % self._autocommit) == 0:
self.commit()
self._commit_counter = 0
def put(self, item):
''' store item in sqlite database
'''
if isinstance(item, self._item_class):
self._put_one(item)
elif isinstance(item, (list, tuple)):
self._put_many(item)
else:
raise RuntimeError('Unknown item(s) type, %s' % type(item))
def _put_one(self, item):
''' store one item in database
'''
# prepare values
values = []
for k, v in item.items():
if k == '_id':
continue
if 'dblite_serializer' in item.fields[k]:
serializer = item.fields[k]['dblite_serializer']
v = serializer.dumps(v)
if v is not None:
v = sqlite3.Binary(buffer(v))
values.append(v)
# check if Item is new => update it
if '_id' in item:
fieldnames = ','.join(['%s=?' % f for f in item if f != '_id'])
values.append(item['_id'])
SQL = 'UPDATE %s SET %s WHERE rowid=?;' % (self._table, fieldnames)
# new Item
else:
fieldnames = ','.join([f for f in item if f != '_id'])
fieldnames_template = ','.join(['?' for f in item if f != '_id'])
SQL = 'INSERT INTO %s (%s) VALUES (%s);' % (self._table, fieldnames, fieldnames_template)
try:
self._cursor.execute(SQL, values)
except sqlite3.OperationalError, err:
raise RuntimeError('Item put() error, %s, SQL: %s, values: %s' % (err, SQL, values) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
self._do_autocommit()
def _put_many(self, items):
''' store items in sqlite database
'''
for item in items:
if not isinstance(item, self._item_class):
raise RuntimeError('Items mismatch for %s and %s' % (self._item_class, type(item)))
self._put_one(item)
def sql(self, sql, params=()):
''' execute sql request and return items
'''
def _items(items):
for item in items:
yield self._item_class(item)
sql = sql.strip()
try:
self._cursor.execute(sql, params)
except sqlite3.OperationalError, err:
raise SQLError('%s, SQL: %s, params: %s' % (err, sql, params) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
if sql.upper().startswith('SELECT'):
return _items(self._cursor.fetchall())
else:
return None
def delete(self, criteria=None, _all=False):
''' delete dictionary(ies) in sqlite database
_all = True - delete all items
'''
if isinstance(criteria, self._item_class):
criteria = {'_id': criteria['_id']}
if criteria is None and not _all:
raise RuntimeError('Criteria is not defined')
SQL = SQLBuilder(self._table, criteria).delete()
self._cursor.execute(SQL)
def __len__(self):
''' return size of storage
'''
SQL = 'SELECT count(*) as count FROM %s;' % self._table
self._cursor.execute(SQL)
return int(self._cursor.fetchone()['count'])
def commit(self):
''' commit changes
'''
try:
self._conn.commit()
except sqlite3.ProgrammingError:
pass
def close(self):
''' close database
'''
self._conn.close()
|
ownport/scrapy-dblite | dblite/__init__.py | _regexp | python | def _regexp(expr, item):
''' REGEXP function for Sqlite
'''
reg = re.compile(expr)
return reg.search(item) is not None | REGEXP function for Sqlite | train | https://github.com/ownport/scrapy-dblite/blob/6de5021caa31d439478d9808738b046d1db699c9/dblite/__init__.py#L54-L58 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# simple library for stroring python dictionaries in sqlite database
#
__author__ = 'Andrey Usov <https://github.com/ownport/scrapy-dblite>'
__version__ = '0.2.7'
import os
import re
import inspect
import sqlite3
from .query import SQLBuilder
from urlparse import urlparse
from .settings import SUPPORTED_BACKENDS
from .settings import ITEMS_PER_REQUEST
class DuplicateItem(Exception):
pass
class SQLError(Exception):
pass
def open(item, uri, autocommit=False):
''' open sqlite database by uri and Item class
'''
return Storage(item, uri, autocommit)
def copy(src, trg, transform=None):
''' copy items with optional fields transformation
'''
source = open(src[0], src[1])
target = open(trg[0], trg[1], autocommit=1000)
for item in source.get():
item = dict(item)
if '_id' in item:
del item['_id']
if transform:
item = transform(item)
target.put(trg[0](item))
source.close()
target.commit()
target.close()
class Storage(object):
''' Storage
store simple dictionaries in sqlite database
'''
def __init__(self, item, uri, autocommit=False):
''' __init__
item - Scrapy item class
uri - URI to sqlite database, sqlite://<sqlite-database>:<table>
autocommit - few variations are possible: boolean (False/True) or integer
True - autocommit after each put()
False - no autocommit, commit() only manual
[integer] - autocommit after N[integer] put()
'''
self._item_class = item
self._fields = dict()
#self._fieldnames = None
database, table = self.parse_uri(uri)
# database file
if database:
self._db = database
else:
raise RuntimeError('Empty database name, "%s"' % database)
# database table
if table:
self._table = table.split(' ')[0]
else:
raise RuntimeError('Empty table name, "%s"' % table)
# sqlite connection
try:
self._conn = sqlite3.connect(database)
except sqlite3.OperationalError, err:
raise RuntimeError("%s, database: %s" % (err, database))
self._conn.row_factory = self._dict_factory
self._conn.create_function("REGEXP", 2, _regexp)
# sqlite cursor
self._cursor = self._conn.cursor()
# autocommit data after put()
self._autocommit = autocommit
# commit counter increased every time after put without commit()
self._commit_counter = 0
self._create_table(self._table)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
@staticmethod
def _dict_factory(cursor, row):
''' factory for sqlite3 to return results as dict
'''
d = {}
for idx, col in enumerate(cursor.description):
if col[0] == 'rowid':
d['_id'] = row[idx]
else:
d[col[0]] = row[idx]
return d
@staticmethod
def parse_uri(uri):
''' parse URI
'''
if not uri or uri.find('://') <= 0:
raise RuntimeError('Incorrect URI definition: {}'.format(uri))
backend, rest_uri = uri.split('://')
if backend not in SUPPORTED_BACKENDS:
raise RuntimeError('Unknown backend: {}'.format(backend))
database, table = rest_uri.rsplit(':',1)
return database, table
@property
def fieldnames(self):
''' return fieldnames
'''
if not self._fields:
if self._item_class is not None:
for m in inspect.getmembers(self._item_class):
if m[0] == 'fields' and isinstance(m[1], dict):
self._fields = m[1]
if not self._fields:
raise RuntimeError('Unknown item type, no fields: %s' % self._item_class)
else:
raise RuntimeError('Item class is not defined, %s' % self._item_class)
return self._fields.keys()
def _create_table(self, table_name):
''' create sqlite's table for storing simple dictionaries
'''
if self.fieldnames:
sql_fields = []
for field in self._fields:
if field != '_id':
if 'dblite' in self._fields[field]:
sql_fields.append(' '.join([field, self._fields[field]['dblite']]))
else:
sql_fields.append(field)
sql_fields = ','.join(sql_fields)
SQL = 'CREATE TABLE IF NOT EXISTS %s (%s);' % (table_name, sql_fields)
try:
self._cursor.execute(SQL)
except sqlite3.OperationalError, err:
raise RuntimeError('Create table error, %s, SQL: %s' % (err, SQL))
def _make_item(self, item):
''' make Item class
'''
for field in self._item_class.fields:
if (field in item) and ('dblite_serializer' in self._item_class.fields[field]):
serializer = self._item_class.fields[field]['dblite_serializer']
item[field] = serializer.loads(item[field])
return self._item_class(item)
def get(self, criteria=None, offset=None, limit=None):
''' returns items selected by criteria
If the criteria is not defined, get() returns all items.
'''
if criteria is None and limit is None:
return self._get_all()
elif limit is not None and limit == 1:
return self.get_one(criteria)
else:
return self._get_with_criteria(criteria, offset=offset, limit=limit)
def _get_all(self):
''' return all items
'''
rowid = 0
while True:
SQL_SELECT_MANY = 'SELECT rowid, * FROM %s WHERE rowid > ? LIMIT ?;' % self._table
self._cursor.execute(SQL_SELECT_MANY, (rowid, ITEMS_PER_REQUEST))
items = self._cursor.fetchall()
if len(items) == 0:
break
for item in items:
rowid = item['_id']
yield self._make_item(item)
def _get_with_criteria(self, criteria, offset=None, limit=None):
''' returns items selected by criteria
'''
SQL = SQLBuilder(self._table, criteria).select(offset=offset, limit=limit)
self._cursor.execute(SQL)
for item in self._cursor.fetchall():
yield self._make_item(item)
def get_one(self, criteria):
''' return one item
'''
try:
items = [item for item in self._get_with_criteria(criteria, limit=1)]
return items[0]
except:
return None
def _do_autocommit(self):
''' perform autocommit
'''
# commit()
self._commit_counter += 1
# autocommit as boolean
if isinstance(self._autocommit, bool) and self._autocommit:
self.commit()
self._commit_counter = 0
# autocommit as counter
elif isinstance(self._autocommit, int) and self._autocommit > 0:
if (self._commit_counter % self._autocommit) == 0:
self.commit()
self._commit_counter = 0
def put(self, item):
''' store item in sqlite database
'''
if isinstance(item, self._item_class):
self._put_one(item)
elif isinstance(item, (list, tuple)):
self._put_many(item)
else:
raise RuntimeError('Unknown item(s) type, %s' % type(item))
def _put_one(self, item):
''' store one item in database
'''
# prepare values
values = []
for k, v in item.items():
if k == '_id':
continue
if 'dblite_serializer' in item.fields[k]:
serializer = item.fields[k]['dblite_serializer']
v = serializer.dumps(v)
if v is not None:
v = sqlite3.Binary(buffer(v))
values.append(v)
# check if Item is new => update it
if '_id' in item:
fieldnames = ','.join(['%s=?' % f for f in item if f != '_id'])
values.append(item['_id'])
SQL = 'UPDATE %s SET %s WHERE rowid=?;' % (self._table, fieldnames)
# new Item
else:
fieldnames = ','.join([f for f in item if f != '_id'])
fieldnames_template = ','.join(['?' for f in item if f != '_id'])
SQL = 'INSERT INTO %s (%s) VALUES (%s);' % (self._table, fieldnames, fieldnames_template)
try:
self._cursor.execute(SQL, values)
except sqlite3.OperationalError, err:
raise RuntimeError('Item put() error, %s, SQL: %s, values: %s' % (err, SQL, values) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
self._do_autocommit()
def _put_many(self, items):
''' store items in sqlite database
'''
for item in items:
if not isinstance(item, self._item_class):
raise RuntimeError('Items mismatch for %s and %s' % (self._item_class, type(item)))
self._put_one(item)
def sql(self, sql, params=()):
''' execute sql request and return items
'''
def _items(items):
for item in items:
yield self._item_class(item)
sql = sql.strip()
try:
self._cursor.execute(sql, params)
except sqlite3.OperationalError, err:
raise SQLError('%s, SQL: %s, params: %s' % (err, sql, params) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
if sql.upper().startswith('SELECT'):
return _items(self._cursor.fetchall())
else:
return None
def delete(self, criteria=None, _all=False):
''' delete dictionary(ies) in sqlite database
_all = True - delete all items
'''
if isinstance(criteria, self._item_class):
criteria = {'_id': criteria['_id']}
if criteria is None and not _all:
raise RuntimeError('Criteria is not defined')
SQL = SQLBuilder(self._table, criteria).delete()
self._cursor.execute(SQL)
def __len__(self):
''' return size of storage
'''
SQL = 'SELECT count(*) as count FROM %s;' % self._table
self._cursor.execute(SQL)
return int(self._cursor.fetchone()['count'])
def commit(self):
''' commit changes
'''
try:
self._conn.commit()
except sqlite3.ProgrammingError:
pass
def close(self):
''' close database
'''
self._conn.close()
|
ownport/scrapy-dblite | dblite/__init__.py | Storage._dict_factory | python | def _dict_factory(cursor, row):
''' factory for sqlite3 to return results as dict
'''
d = {}
for idx, col in enumerate(cursor.description):
if col[0] == 'rowid':
d['_id'] = row[idx]
else:
d[col[0]] = row[idx]
return d | factory for sqlite3 to return results as dict | train | https://github.com/ownport/scrapy-dblite/blob/6de5021caa31d439478d9808738b046d1db699c9/dblite/__init__.py#L121-L130 | null | class Storage(object):
''' Storage
store simple dictionaries in sqlite database
'''
def __init__(self, item, uri, autocommit=False):
''' __init__
item - Scrapy item class
uri - URI to sqlite database, sqlite://<sqlite-database>:<table>
autocommit - few variations are possible: boolean (False/True) or integer
True - autocommit after each put()
False - no autocommit, commit() only manual
[integer] - autocommit after N[integer] put()
'''
self._item_class = item
self._fields = dict()
#self._fieldnames = None
database, table = self.parse_uri(uri)
# database file
if database:
self._db = database
else:
raise RuntimeError('Empty database name, "%s"' % database)
# database table
if table:
self._table = table.split(' ')[0]
else:
raise RuntimeError('Empty table name, "%s"' % table)
# sqlite connection
try:
self._conn = sqlite3.connect(database)
except sqlite3.OperationalError, err:
raise RuntimeError("%s, database: %s" % (err, database))
self._conn.row_factory = self._dict_factory
self._conn.create_function("REGEXP", 2, _regexp)
# sqlite cursor
self._cursor = self._conn.cursor()
# autocommit data after put()
self._autocommit = autocommit
# commit counter increased every time after put without commit()
self._commit_counter = 0
self._create_table(self._table)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
@staticmethod
@staticmethod
def parse_uri(uri):
''' parse URI
'''
if not uri or uri.find('://') <= 0:
raise RuntimeError('Incorrect URI definition: {}'.format(uri))
backend, rest_uri = uri.split('://')
if backend not in SUPPORTED_BACKENDS:
raise RuntimeError('Unknown backend: {}'.format(backend))
database, table = rest_uri.rsplit(':',1)
return database, table
@property
def fieldnames(self):
''' return fieldnames
'''
if not self._fields:
if self._item_class is not None:
for m in inspect.getmembers(self._item_class):
if m[0] == 'fields' and isinstance(m[1], dict):
self._fields = m[1]
if not self._fields:
raise RuntimeError('Unknown item type, no fields: %s' % self._item_class)
else:
raise RuntimeError('Item class is not defined, %s' % self._item_class)
return self._fields.keys()
def _create_table(self, table_name):
''' create sqlite's table for storing simple dictionaries
'''
if self.fieldnames:
sql_fields = []
for field in self._fields:
if field != '_id':
if 'dblite' in self._fields[field]:
sql_fields.append(' '.join([field, self._fields[field]['dblite']]))
else:
sql_fields.append(field)
sql_fields = ','.join(sql_fields)
SQL = 'CREATE TABLE IF NOT EXISTS %s (%s);' % (table_name, sql_fields)
try:
self._cursor.execute(SQL)
except sqlite3.OperationalError, err:
raise RuntimeError('Create table error, %s, SQL: %s' % (err, SQL))
def _make_item(self, item):
''' make Item class
'''
for field in self._item_class.fields:
if (field in item) and ('dblite_serializer' in self._item_class.fields[field]):
serializer = self._item_class.fields[field]['dblite_serializer']
item[field] = serializer.loads(item[field])
return self._item_class(item)
def get(self, criteria=None, offset=None, limit=None):
''' returns items selected by criteria
If the criteria is not defined, get() returns all items.
'''
if criteria is None and limit is None:
return self._get_all()
elif limit is not None and limit == 1:
return self.get_one(criteria)
else:
return self._get_with_criteria(criteria, offset=offset, limit=limit)
def _get_all(self):
''' return all items
'''
rowid = 0
while True:
SQL_SELECT_MANY = 'SELECT rowid, * FROM %s WHERE rowid > ? LIMIT ?;' % self._table
self._cursor.execute(SQL_SELECT_MANY, (rowid, ITEMS_PER_REQUEST))
items = self._cursor.fetchall()
if len(items) == 0:
break
for item in items:
rowid = item['_id']
yield self._make_item(item)
def _get_with_criteria(self, criteria, offset=None, limit=None):
''' returns items selected by criteria
'''
SQL = SQLBuilder(self._table, criteria).select(offset=offset, limit=limit)
self._cursor.execute(SQL)
for item in self._cursor.fetchall():
yield self._make_item(item)
def get_one(self, criteria):
''' return one item
'''
try:
items = [item for item in self._get_with_criteria(criteria, limit=1)]
return items[0]
except:
return None
def _do_autocommit(self):
''' perform autocommit
'''
# commit()
self._commit_counter += 1
# autocommit as boolean
if isinstance(self._autocommit, bool) and self._autocommit:
self.commit()
self._commit_counter = 0
# autocommit as counter
elif isinstance(self._autocommit, int) and self._autocommit > 0:
if (self._commit_counter % self._autocommit) == 0:
self.commit()
self._commit_counter = 0
def put(self, item):
''' store item in sqlite database
'''
if isinstance(item, self._item_class):
self._put_one(item)
elif isinstance(item, (list, tuple)):
self._put_many(item)
else:
raise RuntimeError('Unknown item(s) type, %s' % type(item))
def _put_one(self, item):
''' store one item in database
'''
# prepare values
values = []
for k, v in item.items():
if k == '_id':
continue
if 'dblite_serializer' in item.fields[k]:
serializer = item.fields[k]['dblite_serializer']
v = serializer.dumps(v)
if v is not None:
v = sqlite3.Binary(buffer(v))
values.append(v)
# check if Item is new => update it
if '_id' in item:
fieldnames = ','.join(['%s=?' % f for f in item if f != '_id'])
values.append(item['_id'])
SQL = 'UPDATE %s SET %s WHERE rowid=?;' % (self._table, fieldnames)
# new Item
else:
fieldnames = ','.join([f for f in item if f != '_id'])
fieldnames_template = ','.join(['?' for f in item if f != '_id'])
SQL = 'INSERT INTO %s (%s) VALUES (%s);' % (self._table, fieldnames, fieldnames_template)
try:
self._cursor.execute(SQL, values)
except sqlite3.OperationalError, err:
raise RuntimeError('Item put() error, %s, SQL: %s, values: %s' % (err, SQL, values) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
self._do_autocommit()
def _put_many(self, items):
''' store items in sqlite database
'''
for item in items:
if not isinstance(item, self._item_class):
raise RuntimeError('Items mismatch for %s and %s' % (self._item_class, type(item)))
self._put_one(item)
def sql(self, sql, params=()):
''' execute sql request and return items
'''
def _items(items):
for item in items:
yield self._item_class(item)
sql = sql.strip()
try:
self._cursor.execute(sql, params)
except sqlite3.OperationalError, err:
raise SQLError('%s, SQL: %s, params: %s' % (err, sql, params) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
if sql.upper().startswith('SELECT'):
return _items(self._cursor.fetchall())
else:
return None
def delete(self, criteria=None, _all=False):
''' delete dictionary(ies) in sqlite database
_all = True - delete all items
'''
if isinstance(criteria, self._item_class):
criteria = {'_id': criteria['_id']}
if criteria is None and not _all:
raise RuntimeError('Criteria is not defined')
SQL = SQLBuilder(self._table, criteria).delete()
self._cursor.execute(SQL)
def __len__(self):
''' return size of storage
'''
SQL = 'SELECT count(*) as count FROM %s;' % self._table
self._cursor.execute(SQL)
return int(self._cursor.fetchone()['count'])
def commit(self):
''' commit changes
'''
try:
self._conn.commit()
except sqlite3.ProgrammingError:
pass
def close(self):
''' close database
'''
self._conn.close()
|
ownport/scrapy-dblite | dblite/__init__.py | Storage.parse_uri | python | def parse_uri(uri):
''' parse URI
'''
if not uri or uri.find('://') <= 0:
raise RuntimeError('Incorrect URI definition: {}'.format(uri))
backend, rest_uri = uri.split('://')
if backend not in SUPPORTED_BACKENDS:
raise RuntimeError('Unknown backend: {}'.format(backend))
database, table = rest_uri.rsplit(':',1)
return database, table | parse URI | train | https://github.com/ownport/scrapy-dblite/blob/6de5021caa31d439478d9808738b046d1db699c9/dblite/__init__.py#L133-L143 | null | class Storage(object):
''' Storage
store simple dictionaries in sqlite database
'''
def __init__(self, item, uri, autocommit=False):
''' __init__
item - Scrapy item class
uri - URI to sqlite database, sqlite://<sqlite-database>:<table>
autocommit - few variations are possible: boolean (False/True) or integer
True - autocommit after each put()
False - no autocommit, commit() only manual
[integer] - autocommit after N[integer] put()
'''
self._item_class = item
self._fields = dict()
#self._fieldnames = None
database, table = self.parse_uri(uri)
# database file
if database:
self._db = database
else:
raise RuntimeError('Empty database name, "%s"' % database)
# database table
if table:
self._table = table.split(' ')[0]
else:
raise RuntimeError('Empty table name, "%s"' % table)
# sqlite connection
try:
self._conn = sqlite3.connect(database)
except sqlite3.OperationalError, err:
raise RuntimeError("%s, database: %s" % (err, database))
self._conn.row_factory = self._dict_factory
self._conn.create_function("REGEXP", 2, _regexp)
# sqlite cursor
self._cursor = self._conn.cursor()
# autocommit data after put()
self._autocommit = autocommit
# commit counter increased every time after put without commit()
self._commit_counter = 0
self._create_table(self._table)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
@staticmethod
def _dict_factory(cursor, row):
''' factory for sqlite3 to return results as dict
'''
d = {}
for idx, col in enumerate(cursor.description):
if col[0] == 'rowid':
d['_id'] = row[idx]
else:
d[col[0]] = row[idx]
return d
@staticmethod
@property
def fieldnames(self):
''' return fieldnames
'''
if not self._fields:
if self._item_class is not None:
for m in inspect.getmembers(self._item_class):
if m[0] == 'fields' and isinstance(m[1], dict):
self._fields = m[1]
if not self._fields:
raise RuntimeError('Unknown item type, no fields: %s' % self._item_class)
else:
raise RuntimeError('Item class is not defined, %s' % self._item_class)
return self._fields.keys()
def _create_table(self, table_name):
''' create sqlite's table for storing simple dictionaries
'''
if self.fieldnames:
sql_fields = []
for field in self._fields:
if field != '_id':
if 'dblite' in self._fields[field]:
sql_fields.append(' '.join([field, self._fields[field]['dblite']]))
else:
sql_fields.append(field)
sql_fields = ','.join(sql_fields)
SQL = 'CREATE TABLE IF NOT EXISTS %s (%s);' % (table_name, sql_fields)
try:
self._cursor.execute(SQL)
except sqlite3.OperationalError, err:
raise RuntimeError('Create table error, %s, SQL: %s' % (err, SQL))
def _make_item(self, item):
''' make Item class
'''
for field in self._item_class.fields:
if (field in item) and ('dblite_serializer' in self._item_class.fields[field]):
serializer = self._item_class.fields[field]['dblite_serializer']
item[field] = serializer.loads(item[field])
return self._item_class(item)
def get(self, criteria=None, offset=None, limit=None):
''' returns items selected by criteria
If the criteria is not defined, get() returns all items.
'''
if criteria is None and limit is None:
return self._get_all()
elif limit is not None and limit == 1:
return self.get_one(criteria)
else:
return self._get_with_criteria(criteria, offset=offset, limit=limit)
def _get_all(self):
''' return all items
'''
rowid = 0
while True:
SQL_SELECT_MANY = 'SELECT rowid, * FROM %s WHERE rowid > ? LIMIT ?;' % self._table
self._cursor.execute(SQL_SELECT_MANY, (rowid, ITEMS_PER_REQUEST))
items = self._cursor.fetchall()
if len(items) == 0:
break
for item in items:
rowid = item['_id']
yield self._make_item(item)
def _get_with_criteria(self, criteria, offset=None, limit=None):
''' returns items selected by criteria
'''
SQL = SQLBuilder(self._table, criteria).select(offset=offset, limit=limit)
self._cursor.execute(SQL)
for item in self._cursor.fetchall():
yield self._make_item(item)
def get_one(self, criteria):
''' return one item
'''
try:
items = [item for item in self._get_with_criteria(criteria, limit=1)]
return items[0]
except:
return None
def _do_autocommit(self):
''' perform autocommit
'''
# commit()
self._commit_counter += 1
# autocommit as boolean
if isinstance(self._autocommit, bool) and self._autocommit:
self.commit()
self._commit_counter = 0
# autocommit as counter
elif isinstance(self._autocommit, int) and self._autocommit > 0:
if (self._commit_counter % self._autocommit) == 0:
self.commit()
self._commit_counter = 0
def put(self, item):
''' store item in sqlite database
'''
if isinstance(item, self._item_class):
self._put_one(item)
elif isinstance(item, (list, tuple)):
self._put_many(item)
else:
raise RuntimeError('Unknown item(s) type, %s' % type(item))
def _put_one(self, item):
''' store one item in database
'''
# prepare values
values = []
for k, v in item.items():
if k == '_id':
continue
if 'dblite_serializer' in item.fields[k]:
serializer = item.fields[k]['dblite_serializer']
v = serializer.dumps(v)
if v is not None:
v = sqlite3.Binary(buffer(v))
values.append(v)
# check if Item is new => update it
if '_id' in item:
fieldnames = ','.join(['%s=?' % f for f in item if f != '_id'])
values.append(item['_id'])
SQL = 'UPDATE %s SET %s WHERE rowid=?;' % (self._table, fieldnames)
# new Item
else:
fieldnames = ','.join([f for f in item if f != '_id'])
fieldnames_template = ','.join(['?' for f in item if f != '_id'])
SQL = 'INSERT INTO %s (%s) VALUES (%s);' % (self._table, fieldnames, fieldnames_template)
try:
self._cursor.execute(SQL, values)
except sqlite3.OperationalError, err:
raise RuntimeError('Item put() error, %s, SQL: %s, values: %s' % (err, SQL, values) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
self._do_autocommit()
def _put_many(self, items):
''' store items in sqlite database
'''
for item in items:
if not isinstance(item, self._item_class):
raise RuntimeError('Items mismatch for %s and %s' % (self._item_class, type(item)))
self._put_one(item)
def sql(self, sql, params=()):
''' execute sql request and return items
'''
def _items(items):
for item in items:
yield self._item_class(item)
sql = sql.strip()
try:
self._cursor.execute(sql, params)
except sqlite3.OperationalError, err:
raise SQLError('%s, SQL: %s, params: %s' % (err, sql, params) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
if sql.upper().startswith('SELECT'):
return _items(self._cursor.fetchall())
else:
return None
def delete(self, criteria=None, _all=False):
''' delete dictionary(ies) in sqlite database
_all = True - delete all items
'''
if isinstance(criteria, self._item_class):
criteria = {'_id': criteria['_id']}
if criteria is None and not _all:
raise RuntimeError('Criteria is not defined')
SQL = SQLBuilder(self._table, criteria).delete()
self._cursor.execute(SQL)
def __len__(self):
''' return size of storage
'''
SQL = 'SELECT count(*) as count FROM %s;' % self._table
self._cursor.execute(SQL)
return int(self._cursor.fetchone()['count'])
def commit(self):
''' commit changes
'''
try:
self._conn.commit()
except sqlite3.ProgrammingError:
pass
def close(self):
''' close database
'''
self._conn.close()
|
ownport/scrapy-dblite | dblite/__init__.py | Storage.fieldnames | python | def fieldnames(self):
''' return fieldnames
'''
if not self._fields:
if self._item_class is not None:
for m in inspect.getmembers(self._item_class):
if m[0] == 'fields' and isinstance(m[1], dict):
self._fields = m[1]
if not self._fields:
raise RuntimeError('Unknown item type, no fields: %s' % self._item_class)
else:
raise RuntimeError('Item class is not defined, %s' % self._item_class)
return self._fields.keys() | return fieldnames | train | https://github.com/ownport/scrapy-dblite/blob/6de5021caa31d439478d9808738b046d1db699c9/dblite/__init__.py#L146-L158 | null | class Storage(object):
''' Storage
store simple dictionaries in sqlite database
'''
def __init__(self, item, uri, autocommit=False):
''' __init__
item - Scrapy item class
uri - URI to sqlite database, sqlite://<sqlite-database>:<table>
autocommit - few variations are possible: boolean (False/True) or integer
True - autocommit after each put()
False - no autocommit, commit() only manual
[integer] - autocommit after N[integer] put()
'''
self._item_class = item
self._fields = dict()
#self._fieldnames = None
database, table = self.parse_uri(uri)
# database file
if database:
self._db = database
else:
raise RuntimeError('Empty database name, "%s"' % database)
# database table
if table:
self._table = table.split(' ')[0]
else:
raise RuntimeError('Empty table name, "%s"' % table)
# sqlite connection
try:
self._conn = sqlite3.connect(database)
except sqlite3.OperationalError, err:
raise RuntimeError("%s, database: %s" % (err, database))
self._conn.row_factory = self._dict_factory
self._conn.create_function("REGEXP", 2, _regexp)
# sqlite cursor
self._cursor = self._conn.cursor()
# autocommit data after put()
self._autocommit = autocommit
# commit counter increased every time after put without commit()
self._commit_counter = 0
self._create_table(self._table)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
@staticmethod
def _dict_factory(cursor, row):
''' factory for sqlite3 to return results as dict
'''
d = {}
for idx, col in enumerate(cursor.description):
if col[0] == 'rowid':
d['_id'] = row[idx]
else:
d[col[0]] = row[idx]
return d
@staticmethod
def parse_uri(uri):
''' parse URI
'''
if not uri or uri.find('://') <= 0:
raise RuntimeError('Incorrect URI definition: {}'.format(uri))
backend, rest_uri = uri.split('://')
if backend not in SUPPORTED_BACKENDS:
raise RuntimeError('Unknown backend: {}'.format(backend))
database, table = rest_uri.rsplit(':',1)
return database, table
@property
def _create_table(self, table_name):
''' create sqlite's table for storing simple dictionaries
'''
if self.fieldnames:
sql_fields = []
for field in self._fields:
if field != '_id':
if 'dblite' in self._fields[field]:
sql_fields.append(' '.join([field, self._fields[field]['dblite']]))
else:
sql_fields.append(field)
sql_fields = ','.join(sql_fields)
SQL = 'CREATE TABLE IF NOT EXISTS %s (%s);' % (table_name, sql_fields)
try:
self._cursor.execute(SQL)
except sqlite3.OperationalError, err:
raise RuntimeError('Create table error, %s, SQL: %s' % (err, SQL))
def _make_item(self, item):
''' make Item class
'''
for field in self._item_class.fields:
if (field in item) and ('dblite_serializer' in self._item_class.fields[field]):
serializer = self._item_class.fields[field]['dblite_serializer']
item[field] = serializer.loads(item[field])
return self._item_class(item)
def get(self, criteria=None, offset=None, limit=None):
''' returns items selected by criteria
If the criteria is not defined, get() returns all items.
'''
if criteria is None and limit is None:
return self._get_all()
elif limit is not None and limit == 1:
return self.get_one(criteria)
else:
return self._get_with_criteria(criteria, offset=offset, limit=limit)
def _get_all(self):
''' return all items
'''
rowid = 0
while True:
SQL_SELECT_MANY = 'SELECT rowid, * FROM %s WHERE rowid > ? LIMIT ?;' % self._table
self._cursor.execute(SQL_SELECT_MANY, (rowid, ITEMS_PER_REQUEST))
items = self._cursor.fetchall()
if len(items) == 0:
break
for item in items:
rowid = item['_id']
yield self._make_item(item)
def _get_with_criteria(self, criteria, offset=None, limit=None):
''' returns items selected by criteria
'''
SQL = SQLBuilder(self._table, criteria).select(offset=offset, limit=limit)
self._cursor.execute(SQL)
for item in self._cursor.fetchall():
yield self._make_item(item)
def get_one(self, criteria):
''' return one item
'''
try:
items = [item for item in self._get_with_criteria(criteria, limit=1)]
return items[0]
except:
return None
def _do_autocommit(self):
''' perform autocommit
'''
# commit()
self._commit_counter += 1
# autocommit as boolean
if isinstance(self._autocommit, bool) and self._autocommit:
self.commit()
self._commit_counter = 0
# autocommit as counter
elif isinstance(self._autocommit, int) and self._autocommit > 0:
if (self._commit_counter % self._autocommit) == 0:
self.commit()
self._commit_counter = 0
def put(self, item):
''' store item in sqlite database
'''
if isinstance(item, self._item_class):
self._put_one(item)
elif isinstance(item, (list, tuple)):
self._put_many(item)
else:
raise RuntimeError('Unknown item(s) type, %s' % type(item))
def _put_one(self, item):
''' store one item in database
'''
# prepare values
values = []
for k, v in item.items():
if k == '_id':
continue
if 'dblite_serializer' in item.fields[k]:
serializer = item.fields[k]['dblite_serializer']
v = serializer.dumps(v)
if v is not None:
v = sqlite3.Binary(buffer(v))
values.append(v)
# check if Item is new => update it
if '_id' in item:
fieldnames = ','.join(['%s=?' % f for f in item if f != '_id'])
values.append(item['_id'])
SQL = 'UPDATE %s SET %s WHERE rowid=?;' % (self._table, fieldnames)
# new Item
else:
fieldnames = ','.join([f for f in item if f != '_id'])
fieldnames_template = ','.join(['?' for f in item if f != '_id'])
SQL = 'INSERT INTO %s (%s) VALUES (%s);' % (self._table, fieldnames, fieldnames_template)
try:
self._cursor.execute(SQL, values)
except sqlite3.OperationalError, err:
raise RuntimeError('Item put() error, %s, SQL: %s, values: %s' % (err, SQL, values) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
self._do_autocommit()
def _put_many(self, items):
''' store items in sqlite database
'''
for item in items:
if not isinstance(item, self._item_class):
raise RuntimeError('Items mismatch for %s and %s' % (self._item_class, type(item)))
self._put_one(item)
def sql(self, sql, params=()):
''' execute sql request and return items
'''
def _items(items):
for item in items:
yield self._item_class(item)
sql = sql.strip()
try:
self._cursor.execute(sql, params)
except sqlite3.OperationalError, err:
raise SQLError('%s, SQL: %s, params: %s' % (err, sql, params) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
if sql.upper().startswith('SELECT'):
return _items(self._cursor.fetchall())
else:
return None
def delete(self, criteria=None, _all=False):
''' delete dictionary(ies) in sqlite database
_all = True - delete all items
'''
if isinstance(criteria, self._item_class):
criteria = {'_id': criteria['_id']}
if criteria is None and not _all:
raise RuntimeError('Criteria is not defined')
SQL = SQLBuilder(self._table, criteria).delete()
self._cursor.execute(SQL)
def __len__(self):
''' return size of storage
'''
SQL = 'SELECT count(*) as count FROM %s;' % self._table
self._cursor.execute(SQL)
return int(self._cursor.fetchone()['count'])
def commit(self):
''' commit changes
'''
try:
self._conn.commit()
except sqlite3.ProgrammingError:
pass
def close(self):
''' close database
'''
self._conn.close()
|
ownport/scrapy-dblite | dblite/__init__.py | Storage._create_table | python | def _create_table(self, table_name):
''' create sqlite's table for storing simple dictionaries
'''
if self.fieldnames:
sql_fields = []
for field in self._fields:
if field != '_id':
if 'dblite' in self._fields[field]:
sql_fields.append(' '.join([field, self._fields[field]['dblite']]))
else:
sql_fields.append(field)
sql_fields = ','.join(sql_fields)
SQL = 'CREATE TABLE IF NOT EXISTS %s (%s);' % (table_name, sql_fields)
try:
self._cursor.execute(SQL)
except sqlite3.OperationalError, err:
raise RuntimeError('Create table error, %s, SQL: %s' % (err, SQL)) | create sqlite's table for storing simple dictionaries | train | https://github.com/ownport/scrapy-dblite/blob/6de5021caa31d439478d9808738b046d1db699c9/dblite/__init__.py#L160-L176 | null | class Storage(object):
''' Storage
store simple dictionaries in sqlite database
'''
def __init__(self, item, uri, autocommit=False):
''' __init__
item - Scrapy item class
uri - URI to sqlite database, sqlite://<sqlite-database>:<table>
autocommit - few variations are possible: boolean (False/True) or integer
True - autocommit after each put()
False - no autocommit, commit() only manual
[integer] - autocommit after N[integer] put()
'''
self._item_class = item
self._fields = dict()
#self._fieldnames = None
database, table = self.parse_uri(uri)
# database file
if database:
self._db = database
else:
raise RuntimeError('Empty database name, "%s"' % database)
# database table
if table:
self._table = table.split(' ')[0]
else:
raise RuntimeError('Empty table name, "%s"' % table)
# sqlite connection
try:
self._conn = sqlite3.connect(database)
except sqlite3.OperationalError, err:
raise RuntimeError("%s, database: %s" % (err, database))
self._conn.row_factory = self._dict_factory
self._conn.create_function("REGEXP", 2, _regexp)
# sqlite cursor
self._cursor = self._conn.cursor()
# autocommit data after put()
self._autocommit = autocommit
# commit counter increased every time after put without commit()
self._commit_counter = 0
self._create_table(self._table)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
@staticmethod
def _dict_factory(cursor, row):
''' factory for sqlite3 to return results as dict
'''
d = {}
for idx, col in enumerate(cursor.description):
if col[0] == 'rowid':
d['_id'] = row[idx]
else:
d[col[0]] = row[idx]
return d
@staticmethod
def parse_uri(uri):
''' parse URI
'''
if not uri or uri.find('://') <= 0:
raise RuntimeError('Incorrect URI definition: {}'.format(uri))
backend, rest_uri = uri.split('://')
if backend not in SUPPORTED_BACKENDS:
raise RuntimeError('Unknown backend: {}'.format(backend))
database, table = rest_uri.rsplit(':',1)
return database, table
@property
def fieldnames(self):
''' return fieldnames
'''
if not self._fields:
if self._item_class is not None:
for m in inspect.getmembers(self._item_class):
if m[0] == 'fields' and isinstance(m[1], dict):
self._fields = m[1]
if not self._fields:
raise RuntimeError('Unknown item type, no fields: %s' % self._item_class)
else:
raise RuntimeError('Item class is not defined, %s' % self._item_class)
return self._fields.keys()
def _make_item(self, item):
''' make Item class
'''
for field in self._item_class.fields:
if (field in item) and ('dblite_serializer' in self._item_class.fields[field]):
serializer = self._item_class.fields[field]['dblite_serializer']
item[field] = serializer.loads(item[field])
return self._item_class(item)
def get(self, criteria=None, offset=None, limit=None):
''' returns items selected by criteria
If the criteria is not defined, get() returns all items.
'''
if criteria is None and limit is None:
return self._get_all()
elif limit is not None and limit == 1:
return self.get_one(criteria)
else:
return self._get_with_criteria(criteria, offset=offset, limit=limit)
def _get_all(self):
''' return all items
'''
rowid = 0
while True:
SQL_SELECT_MANY = 'SELECT rowid, * FROM %s WHERE rowid > ? LIMIT ?;' % self._table
self._cursor.execute(SQL_SELECT_MANY, (rowid, ITEMS_PER_REQUEST))
items = self._cursor.fetchall()
if len(items) == 0:
break
for item in items:
rowid = item['_id']
yield self._make_item(item)
def _get_with_criteria(self, criteria, offset=None, limit=None):
''' returns items selected by criteria
'''
SQL = SQLBuilder(self._table, criteria).select(offset=offset, limit=limit)
self._cursor.execute(SQL)
for item in self._cursor.fetchall():
yield self._make_item(item)
def get_one(self, criteria):
''' return one item
'''
try:
items = [item for item in self._get_with_criteria(criteria, limit=1)]
return items[0]
except:
return None
def _do_autocommit(self):
''' perform autocommit
'''
# commit()
self._commit_counter += 1
# autocommit as boolean
if isinstance(self._autocommit, bool) and self._autocommit:
self.commit()
self._commit_counter = 0
# autocommit as counter
elif isinstance(self._autocommit, int) and self._autocommit > 0:
if (self._commit_counter % self._autocommit) == 0:
self.commit()
self._commit_counter = 0
def put(self, item):
''' store item in sqlite database
'''
if isinstance(item, self._item_class):
self._put_one(item)
elif isinstance(item, (list, tuple)):
self._put_many(item)
else:
raise RuntimeError('Unknown item(s) type, %s' % type(item))
def _put_one(self, item):
''' store one item in database
'''
# prepare values
values = []
for k, v in item.items():
if k == '_id':
continue
if 'dblite_serializer' in item.fields[k]:
serializer = item.fields[k]['dblite_serializer']
v = serializer.dumps(v)
if v is not None:
v = sqlite3.Binary(buffer(v))
values.append(v)
# check if Item is new => update it
if '_id' in item:
fieldnames = ','.join(['%s=?' % f for f in item if f != '_id'])
values.append(item['_id'])
SQL = 'UPDATE %s SET %s WHERE rowid=?;' % (self._table, fieldnames)
# new Item
else:
fieldnames = ','.join([f for f in item if f != '_id'])
fieldnames_template = ','.join(['?' for f in item if f != '_id'])
SQL = 'INSERT INTO %s (%s) VALUES (%s);' % (self._table, fieldnames, fieldnames_template)
try:
self._cursor.execute(SQL, values)
except sqlite3.OperationalError, err:
raise RuntimeError('Item put() error, %s, SQL: %s, values: %s' % (err, SQL, values) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
self._do_autocommit()
def _put_many(self, items):
''' store items in sqlite database
'''
for item in items:
if not isinstance(item, self._item_class):
raise RuntimeError('Items mismatch for %s and %s' % (self._item_class, type(item)))
self._put_one(item)
def sql(self, sql, params=()):
''' execute sql request and return items
'''
def _items(items):
for item in items:
yield self._item_class(item)
sql = sql.strip()
try:
self._cursor.execute(sql, params)
except sqlite3.OperationalError, err:
raise SQLError('%s, SQL: %s, params: %s' % (err, sql, params) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
if sql.upper().startswith('SELECT'):
return _items(self._cursor.fetchall())
else:
return None
def delete(self, criteria=None, _all=False):
''' delete dictionary(ies) in sqlite database
_all = True - delete all items
'''
if isinstance(criteria, self._item_class):
criteria = {'_id': criteria['_id']}
if criteria is None and not _all:
raise RuntimeError('Criteria is not defined')
SQL = SQLBuilder(self._table, criteria).delete()
self._cursor.execute(SQL)
def __len__(self):
''' return size of storage
'''
SQL = 'SELECT count(*) as count FROM %s;' % self._table
self._cursor.execute(SQL)
return int(self._cursor.fetchone()['count'])
def commit(self):
''' commit changes
'''
try:
self._conn.commit()
except sqlite3.ProgrammingError:
pass
def close(self):
''' close database
'''
self._conn.close()
|
ownport/scrapy-dblite | dblite/__init__.py | Storage._make_item | python | def _make_item(self, item):
''' make Item class
'''
for field in self._item_class.fields:
if (field in item) and ('dblite_serializer' in self._item_class.fields[field]):
serializer = self._item_class.fields[field]['dblite_serializer']
item[field] = serializer.loads(item[field])
return self._item_class(item) | make Item class | train | https://github.com/ownport/scrapy-dblite/blob/6de5021caa31d439478d9808738b046d1db699c9/dblite/__init__.py#L178-L185 | null | class Storage(object):
''' Storage
store simple dictionaries in sqlite database
'''
def __init__(self, item, uri, autocommit=False):
''' __init__
item - Scrapy item class
uri - URI to sqlite database, sqlite://<sqlite-database>:<table>
autocommit - few variations are possible: boolean (False/True) or integer
True - autocommit after each put()
False - no autocommit, commit() only manual
[integer] - autocommit after N[integer] put()
'''
self._item_class = item
self._fields = dict()
#self._fieldnames = None
database, table = self.parse_uri(uri)
# database file
if database:
self._db = database
else:
raise RuntimeError('Empty database name, "%s"' % database)
# database table
if table:
self._table = table.split(' ')[0]
else:
raise RuntimeError('Empty table name, "%s"' % table)
# sqlite connection
try:
self._conn = sqlite3.connect(database)
except sqlite3.OperationalError, err:
raise RuntimeError("%s, database: %s" % (err, database))
self._conn.row_factory = self._dict_factory
self._conn.create_function("REGEXP", 2, _regexp)
# sqlite cursor
self._cursor = self._conn.cursor()
# autocommit data after put()
self._autocommit = autocommit
# commit counter increased every time after put without commit()
self._commit_counter = 0
self._create_table(self._table)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
@staticmethod
def _dict_factory(cursor, row):
''' factory for sqlite3 to return results as dict
'''
d = {}
for idx, col in enumerate(cursor.description):
if col[0] == 'rowid':
d['_id'] = row[idx]
else:
d[col[0]] = row[idx]
return d
@staticmethod
def parse_uri(uri):
''' parse URI
'''
if not uri or uri.find('://') <= 0:
raise RuntimeError('Incorrect URI definition: {}'.format(uri))
backend, rest_uri = uri.split('://')
if backend not in SUPPORTED_BACKENDS:
raise RuntimeError('Unknown backend: {}'.format(backend))
database, table = rest_uri.rsplit(':',1)
return database, table
@property
def fieldnames(self):
''' return fieldnames
'''
if not self._fields:
if self._item_class is not None:
for m in inspect.getmembers(self._item_class):
if m[0] == 'fields' and isinstance(m[1], dict):
self._fields = m[1]
if not self._fields:
raise RuntimeError('Unknown item type, no fields: %s' % self._item_class)
else:
raise RuntimeError('Item class is not defined, %s' % self._item_class)
return self._fields.keys()
def _create_table(self, table_name):
''' create sqlite's table for storing simple dictionaries
'''
if self.fieldnames:
sql_fields = []
for field in self._fields:
if field != '_id':
if 'dblite' in self._fields[field]:
sql_fields.append(' '.join([field, self._fields[field]['dblite']]))
else:
sql_fields.append(field)
sql_fields = ','.join(sql_fields)
SQL = 'CREATE TABLE IF NOT EXISTS %s (%s);' % (table_name, sql_fields)
try:
self._cursor.execute(SQL)
except sqlite3.OperationalError, err:
raise RuntimeError('Create table error, %s, SQL: %s' % (err, SQL))
def get(self, criteria=None, offset=None, limit=None):
''' returns items selected by criteria
If the criteria is not defined, get() returns all items.
'''
if criteria is None and limit is None:
return self._get_all()
elif limit is not None and limit == 1:
return self.get_one(criteria)
else:
return self._get_with_criteria(criteria, offset=offset, limit=limit)
def _get_all(self):
''' return all items
'''
rowid = 0
while True:
SQL_SELECT_MANY = 'SELECT rowid, * FROM %s WHERE rowid > ? LIMIT ?;' % self._table
self._cursor.execute(SQL_SELECT_MANY, (rowid, ITEMS_PER_REQUEST))
items = self._cursor.fetchall()
if len(items) == 0:
break
for item in items:
rowid = item['_id']
yield self._make_item(item)
def _get_with_criteria(self, criteria, offset=None, limit=None):
''' returns items selected by criteria
'''
SQL = SQLBuilder(self._table, criteria).select(offset=offset, limit=limit)
self._cursor.execute(SQL)
for item in self._cursor.fetchall():
yield self._make_item(item)
def get_one(self, criteria):
''' return one item
'''
try:
items = [item for item in self._get_with_criteria(criteria, limit=1)]
return items[0]
except:
return None
def _do_autocommit(self):
''' perform autocommit
'''
# commit()
self._commit_counter += 1
# autocommit as boolean
if isinstance(self._autocommit, bool) and self._autocommit:
self.commit()
self._commit_counter = 0
# autocommit as counter
elif isinstance(self._autocommit, int) and self._autocommit > 0:
if (self._commit_counter % self._autocommit) == 0:
self.commit()
self._commit_counter = 0
def put(self, item):
''' store item in sqlite database
'''
if isinstance(item, self._item_class):
self._put_one(item)
elif isinstance(item, (list, tuple)):
self._put_many(item)
else:
raise RuntimeError('Unknown item(s) type, %s' % type(item))
def _put_one(self, item):
''' store one item in database
'''
# prepare values
values = []
for k, v in item.items():
if k == '_id':
continue
if 'dblite_serializer' in item.fields[k]:
serializer = item.fields[k]['dblite_serializer']
v = serializer.dumps(v)
if v is not None:
v = sqlite3.Binary(buffer(v))
values.append(v)
# check if Item is new => update it
if '_id' in item:
fieldnames = ','.join(['%s=?' % f for f in item if f != '_id'])
values.append(item['_id'])
SQL = 'UPDATE %s SET %s WHERE rowid=?;' % (self._table, fieldnames)
# new Item
else:
fieldnames = ','.join([f for f in item if f != '_id'])
fieldnames_template = ','.join(['?' for f in item if f != '_id'])
SQL = 'INSERT INTO %s (%s) VALUES (%s);' % (self._table, fieldnames, fieldnames_template)
try:
self._cursor.execute(SQL, values)
except sqlite3.OperationalError, err:
raise RuntimeError('Item put() error, %s, SQL: %s, values: %s' % (err, SQL, values) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
self._do_autocommit()
def _put_many(self, items):
''' store items in sqlite database
'''
for item in items:
if not isinstance(item, self._item_class):
raise RuntimeError('Items mismatch for %s and %s' % (self._item_class, type(item)))
self._put_one(item)
def sql(self, sql, params=()):
''' execute sql request and return items
'''
def _items(items):
for item in items:
yield self._item_class(item)
sql = sql.strip()
try:
self._cursor.execute(sql, params)
except sqlite3.OperationalError, err:
raise SQLError('%s, SQL: %s, params: %s' % (err, sql, params) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
if sql.upper().startswith('SELECT'):
return _items(self._cursor.fetchall())
else:
return None
def delete(self, criteria=None, _all=False):
''' delete dictionary(ies) in sqlite database
_all = True - delete all items
'''
if isinstance(criteria, self._item_class):
criteria = {'_id': criteria['_id']}
if criteria is None and not _all:
raise RuntimeError('Criteria is not defined')
SQL = SQLBuilder(self._table, criteria).delete()
self._cursor.execute(SQL)
def __len__(self):
''' return size of storage
'''
SQL = 'SELECT count(*) as count FROM %s;' % self._table
self._cursor.execute(SQL)
return int(self._cursor.fetchone()['count'])
def commit(self):
''' commit changes
'''
try:
self._conn.commit()
except sqlite3.ProgrammingError:
pass
def close(self):
''' close database
'''
self._conn.close()
|
ownport/scrapy-dblite | dblite/__init__.py | Storage.get | python | def get(self, criteria=None, offset=None, limit=None):
''' returns items selected by criteria
If the criteria is not defined, get() returns all items.
'''
if criteria is None and limit is None:
return self._get_all()
elif limit is not None and limit == 1:
return self.get_one(criteria)
else:
return self._get_with_criteria(criteria, offset=offset, limit=limit) | returns items selected by criteria
If the criteria is not defined, get() returns all items. | train | https://github.com/ownport/scrapy-dblite/blob/6de5021caa31d439478d9808738b046d1db699c9/dblite/__init__.py#L187-L197 | [
"def _get_all(self):\n ''' return all items\n '''\n rowid = 0\n while True:\n SQL_SELECT_MANY = 'SELECT rowid, * FROM %s WHERE rowid > ? LIMIT ?;' % self._table\n self._cursor.execute(SQL_SELECT_MANY, (rowid, ITEMS_PER_REQUEST))\n items = self._cursor.fetchall()\n if len(item... | class Storage(object):
''' Storage
store simple dictionaries in sqlite database
'''
def __init__(self, item, uri, autocommit=False):
''' __init__
item - Scrapy item class
uri - URI to sqlite database, sqlite://<sqlite-database>:<table>
autocommit - few variations are possible: boolean (False/True) or integer
True - autocommit after each put()
False - no autocommit, commit() only manual
[integer] - autocommit after N[integer] put()
'''
self._item_class = item
self._fields = dict()
#self._fieldnames = None
database, table = self.parse_uri(uri)
# database file
if database:
self._db = database
else:
raise RuntimeError('Empty database name, "%s"' % database)
# database table
if table:
self._table = table.split(' ')[0]
else:
raise RuntimeError('Empty table name, "%s"' % table)
# sqlite connection
try:
self._conn = sqlite3.connect(database)
except sqlite3.OperationalError, err:
raise RuntimeError("%s, database: %s" % (err, database))
self._conn.row_factory = self._dict_factory
self._conn.create_function("REGEXP", 2, _regexp)
# sqlite cursor
self._cursor = self._conn.cursor()
# autocommit data after put()
self._autocommit = autocommit
# commit counter increased every time after put without commit()
self._commit_counter = 0
self._create_table(self._table)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
@staticmethod
def _dict_factory(cursor, row):
''' factory for sqlite3 to return results as dict
'''
d = {}
for idx, col in enumerate(cursor.description):
if col[0] == 'rowid':
d['_id'] = row[idx]
else:
d[col[0]] = row[idx]
return d
@staticmethod
def parse_uri(uri):
''' parse URI
'''
if not uri or uri.find('://') <= 0:
raise RuntimeError('Incorrect URI definition: {}'.format(uri))
backend, rest_uri = uri.split('://')
if backend not in SUPPORTED_BACKENDS:
raise RuntimeError('Unknown backend: {}'.format(backend))
database, table = rest_uri.rsplit(':',1)
return database, table
@property
def fieldnames(self):
''' return fieldnames
'''
if not self._fields:
if self._item_class is not None:
for m in inspect.getmembers(self._item_class):
if m[0] == 'fields' and isinstance(m[1], dict):
self._fields = m[1]
if not self._fields:
raise RuntimeError('Unknown item type, no fields: %s' % self._item_class)
else:
raise RuntimeError('Item class is not defined, %s' % self._item_class)
return self._fields.keys()
def _create_table(self, table_name):
''' create sqlite's table for storing simple dictionaries
'''
if self.fieldnames:
sql_fields = []
for field in self._fields:
if field != '_id':
if 'dblite' in self._fields[field]:
sql_fields.append(' '.join([field, self._fields[field]['dblite']]))
else:
sql_fields.append(field)
sql_fields = ','.join(sql_fields)
SQL = 'CREATE TABLE IF NOT EXISTS %s (%s);' % (table_name, sql_fields)
try:
self._cursor.execute(SQL)
except sqlite3.OperationalError, err:
raise RuntimeError('Create table error, %s, SQL: %s' % (err, SQL))
def _make_item(self, item):
''' make Item class
'''
for field in self._item_class.fields:
if (field in item) and ('dblite_serializer' in self._item_class.fields[field]):
serializer = self._item_class.fields[field]['dblite_serializer']
item[field] = serializer.loads(item[field])
return self._item_class(item)
def _get_all(self):
''' return all items
'''
rowid = 0
while True:
SQL_SELECT_MANY = 'SELECT rowid, * FROM %s WHERE rowid > ? LIMIT ?;' % self._table
self._cursor.execute(SQL_SELECT_MANY, (rowid, ITEMS_PER_REQUEST))
items = self._cursor.fetchall()
if len(items) == 0:
break
for item in items:
rowid = item['_id']
yield self._make_item(item)
def _get_with_criteria(self, criteria, offset=None, limit=None):
''' returns items selected by criteria
'''
SQL = SQLBuilder(self._table, criteria).select(offset=offset, limit=limit)
self._cursor.execute(SQL)
for item in self._cursor.fetchall():
yield self._make_item(item)
def get_one(self, criteria):
''' return one item
'''
try:
items = [item for item in self._get_with_criteria(criteria, limit=1)]
return items[0]
except:
return None
def _do_autocommit(self):
''' perform autocommit
'''
# commit()
self._commit_counter += 1
# autocommit as boolean
if isinstance(self._autocommit, bool) and self._autocommit:
self.commit()
self._commit_counter = 0
# autocommit as counter
elif isinstance(self._autocommit, int) and self._autocommit > 0:
if (self._commit_counter % self._autocommit) == 0:
self.commit()
self._commit_counter = 0
def put(self, item):
''' store item in sqlite database
'''
if isinstance(item, self._item_class):
self._put_one(item)
elif isinstance(item, (list, tuple)):
self._put_many(item)
else:
raise RuntimeError('Unknown item(s) type, %s' % type(item))
def _put_one(self, item):
''' store one item in database
'''
# prepare values
values = []
for k, v in item.items():
if k == '_id':
continue
if 'dblite_serializer' in item.fields[k]:
serializer = item.fields[k]['dblite_serializer']
v = serializer.dumps(v)
if v is not None:
v = sqlite3.Binary(buffer(v))
values.append(v)
# check if Item is new => update it
if '_id' in item:
fieldnames = ','.join(['%s=?' % f for f in item if f != '_id'])
values.append(item['_id'])
SQL = 'UPDATE %s SET %s WHERE rowid=?;' % (self._table, fieldnames)
# new Item
else:
fieldnames = ','.join([f for f in item if f != '_id'])
fieldnames_template = ','.join(['?' for f in item if f != '_id'])
SQL = 'INSERT INTO %s (%s) VALUES (%s);' % (self._table, fieldnames, fieldnames_template)
try:
self._cursor.execute(SQL, values)
except sqlite3.OperationalError, err:
raise RuntimeError('Item put() error, %s, SQL: %s, values: %s' % (err, SQL, values) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
self._do_autocommit()
def _put_many(self, items):
''' store items in sqlite database
'''
for item in items:
if not isinstance(item, self._item_class):
raise RuntimeError('Items mismatch for %s and %s' % (self._item_class, type(item)))
self._put_one(item)
def sql(self, sql, params=()):
''' execute sql request and return items
'''
def _items(items):
for item in items:
yield self._item_class(item)
sql = sql.strip()
try:
self._cursor.execute(sql, params)
except sqlite3.OperationalError, err:
raise SQLError('%s, SQL: %s, params: %s' % (err, sql, params) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
if sql.upper().startswith('SELECT'):
return _items(self._cursor.fetchall())
else:
return None
def delete(self, criteria=None, _all=False):
''' delete dictionary(ies) in sqlite database
_all = True - delete all items
'''
if isinstance(criteria, self._item_class):
criteria = {'_id': criteria['_id']}
if criteria is None and not _all:
raise RuntimeError('Criteria is not defined')
SQL = SQLBuilder(self._table, criteria).delete()
self._cursor.execute(SQL)
def __len__(self):
''' return size of storage
'''
SQL = 'SELECT count(*) as count FROM %s;' % self._table
self._cursor.execute(SQL)
return int(self._cursor.fetchone()['count'])
def commit(self):
''' commit changes
'''
try:
self._conn.commit()
except sqlite3.ProgrammingError:
pass
def close(self):
''' close database
'''
self._conn.close()
|
ownport/scrapy-dblite | dblite/__init__.py | Storage._get_all | python | def _get_all(self):
''' return all items
'''
rowid = 0
while True:
SQL_SELECT_MANY = 'SELECT rowid, * FROM %s WHERE rowid > ? LIMIT ?;' % self._table
self._cursor.execute(SQL_SELECT_MANY, (rowid, ITEMS_PER_REQUEST))
items = self._cursor.fetchall()
if len(items) == 0:
break
for item in items:
rowid = item['_id']
yield self._make_item(item) | return all items | train | https://github.com/ownport/scrapy-dblite/blob/6de5021caa31d439478d9808738b046d1db699c9/dblite/__init__.py#L199-L211 | [
"def _make_item(self, item):\n ''' make Item class\n '''\n for field in self._item_class.fields:\n if (field in item) and ('dblite_serializer' in self._item_class.fields[field]):\n serializer = self._item_class.fields[field]['dblite_serializer']\n item[field] = serializer.loads... | class Storage(object):
''' Storage
store simple dictionaries in sqlite database
'''
def __init__(self, item, uri, autocommit=False):
''' __init__
item - Scrapy item class
uri - URI to sqlite database, sqlite://<sqlite-database>:<table>
autocommit - few variations are possible: boolean (False/True) or integer
True - autocommit after each put()
False - no autocommit, commit() only manual
[integer] - autocommit after N[integer] put()
'''
self._item_class = item
self._fields = dict()
#self._fieldnames = None
database, table = self.parse_uri(uri)
# database file
if database:
self._db = database
else:
raise RuntimeError('Empty database name, "%s"' % database)
# database table
if table:
self._table = table.split(' ')[0]
else:
raise RuntimeError('Empty table name, "%s"' % table)
# sqlite connection
try:
self._conn = sqlite3.connect(database)
except sqlite3.OperationalError, err:
raise RuntimeError("%s, database: %s" % (err, database))
self._conn.row_factory = self._dict_factory
self._conn.create_function("REGEXP", 2, _regexp)
# sqlite cursor
self._cursor = self._conn.cursor()
# autocommit data after put()
self._autocommit = autocommit
# commit counter increased every time after put without commit()
self._commit_counter = 0
self._create_table(self._table)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
@staticmethod
def _dict_factory(cursor, row):
''' factory for sqlite3 to return results as dict
'''
d = {}
for idx, col in enumerate(cursor.description):
if col[0] == 'rowid':
d['_id'] = row[idx]
else:
d[col[0]] = row[idx]
return d
@staticmethod
def parse_uri(uri):
''' parse URI
'''
if not uri or uri.find('://') <= 0:
raise RuntimeError('Incorrect URI definition: {}'.format(uri))
backend, rest_uri = uri.split('://')
if backend not in SUPPORTED_BACKENDS:
raise RuntimeError('Unknown backend: {}'.format(backend))
database, table = rest_uri.rsplit(':',1)
return database, table
@property
def fieldnames(self):
''' return fieldnames
'''
if not self._fields:
if self._item_class is not None:
for m in inspect.getmembers(self._item_class):
if m[0] == 'fields' and isinstance(m[1], dict):
self._fields = m[1]
if not self._fields:
raise RuntimeError('Unknown item type, no fields: %s' % self._item_class)
else:
raise RuntimeError('Item class is not defined, %s' % self._item_class)
return self._fields.keys()
def _create_table(self, table_name):
''' create sqlite's table for storing simple dictionaries
'''
if self.fieldnames:
sql_fields = []
for field in self._fields:
if field != '_id':
if 'dblite' in self._fields[field]:
sql_fields.append(' '.join([field, self._fields[field]['dblite']]))
else:
sql_fields.append(field)
sql_fields = ','.join(sql_fields)
SQL = 'CREATE TABLE IF NOT EXISTS %s (%s);' % (table_name, sql_fields)
try:
self._cursor.execute(SQL)
except sqlite3.OperationalError, err:
raise RuntimeError('Create table error, %s, SQL: %s' % (err, SQL))
def _make_item(self, item):
''' make Item class
'''
for field in self._item_class.fields:
if (field in item) and ('dblite_serializer' in self._item_class.fields[field]):
serializer = self._item_class.fields[field]['dblite_serializer']
item[field] = serializer.loads(item[field])
return self._item_class(item)
def get(self, criteria=None, offset=None, limit=None):
''' returns items selected by criteria
If the criteria is not defined, get() returns all items.
'''
if criteria is None and limit is None:
return self._get_all()
elif limit is not None and limit == 1:
return self.get_one(criteria)
else:
return self._get_with_criteria(criteria, offset=offset, limit=limit)
def _get_with_criteria(self, criteria, offset=None, limit=None):
''' returns items selected by criteria
'''
SQL = SQLBuilder(self._table, criteria).select(offset=offset, limit=limit)
self._cursor.execute(SQL)
for item in self._cursor.fetchall():
yield self._make_item(item)
def get_one(self, criteria):
''' return one item
'''
try:
items = [item for item in self._get_with_criteria(criteria, limit=1)]
return items[0]
except:
return None
def _do_autocommit(self):
''' perform autocommit
'''
# commit()
self._commit_counter += 1
# autocommit as boolean
if isinstance(self._autocommit, bool) and self._autocommit:
self.commit()
self._commit_counter = 0
# autocommit as counter
elif isinstance(self._autocommit, int) and self._autocommit > 0:
if (self._commit_counter % self._autocommit) == 0:
self.commit()
self._commit_counter = 0
def put(self, item):
''' store item in sqlite database
'''
if isinstance(item, self._item_class):
self._put_one(item)
elif isinstance(item, (list, tuple)):
self._put_many(item)
else:
raise RuntimeError('Unknown item(s) type, %s' % type(item))
def _put_one(self, item):
''' store one item in database
'''
# prepare values
values = []
for k, v in item.items():
if k == '_id':
continue
if 'dblite_serializer' in item.fields[k]:
serializer = item.fields[k]['dblite_serializer']
v = serializer.dumps(v)
if v is not None:
v = sqlite3.Binary(buffer(v))
values.append(v)
# check if Item is new => update it
if '_id' in item:
fieldnames = ','.join(['%s=?' % f for f in item if f != '_id'])
values.append(item['_id'])
SQL = 'UPDATE %s SET %s WHERE rowid=?;' % (self._table, fieldnames)
# new Item
else:
fieldnames = ','.join([f for f in item if f != '_id'])
fieldnames_template = ','.join(['?' for f in item if f != '_id'])
SQL = 'INSERT INTO %s (%s) VALUES (%s);' % (self._table, fieldnames, fieldnames_template)
try:
self._cursor.execute(SQL, values)
except sqlite3.OperationalError, err:
raise RuntimeError('Item put() error, %s, SQL: %s, values: %s' % (err, SQL, values) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
self._do_autocommit()
def _put_many(self, items):
''' store items in sqlite database
'''
for item in items:
if not isinstance(item, self._item_class):
raise RuntimeError('Items mismatch for %s and %s' % (self._item_class, type(item)))
self._put_one(item)
def sql(self, sql, params=()):
''' execute sql request and return items
'''
def _items(items):
for item in items:
yield self._item_class(item)
sql = sql.strip()
try:
self._cursor.execute(sql, params)
except sqlite3.OperationalError, err:
raise SQLError('%s, SQL: %s, params: %s' % (err, sql, params) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
if sql.upper().startswith('SELECT'):
return _items(self._cursor.fetchall())
else:
return None
def delete(self, criteria=None, _all=False):
''' delete dictionary(ies) in sqlite database
_all = True - delete all items
'''
if isinstance(criteria, self._item_class):
criteria = {'_id': criteria['_id']}
if criteria is None and not _all:
raise RuntimeError('Criteria is not defined')
SQL = SQLBuilder(self._table, criteria).delete()
self._cursor.execute(SQL)
def __len__(self):
''' return size of storage
'''
SQL = 'SELECT count(*) as count FROM %s;' % self._table
self._cursor.execute(SQL)
return int(self._cursor.fetchone()['count'])
def commit(self):
''' commit changes
'''
try:
self._conn.commit()
except sqlite3.ProgrammingError:
pass
def close(self):
''' close database
'''
self._conn.close()
|
ownport/scrapy-dblite | dblite/__init__.py | Storage._get_with_criteria | python | def _get_with_criteria(self, criteria, offset=None, limit=None):
''' returns items selected by criteria
'''
SQL = SQLBuilder(self._table, criteria).select(offset=offset, limit=limit)
self._cursor.execute(SQL)
for item in self._cursor.fetchall():
yield self._make_item(item) | returns items selected by criteria | train | https://github.com/ownport/scrapy-dblite/blob/6de5021caa31d439478d9808738b046d1db699c9/dblite/__init__.py#L213-L219 | [
"def _make_item(self, item):\n ''' make Item class\n '''\n for field in self._item_class.fields:\n if (field in item) and ('dblite_serializer' in self._item_class.fields[field]):\n serializer = self._item_class.fields[field]['dblite_serializer']\n item[field] = serializer.loads... | class Storage(object):
''' Storage
store simple dictionaries in sqlite database
'''
def __init__(self, item, uri, autocommit=False):
''' __init__
item - Scrapy item class
uri - URI to sqlite database, sqlite://<sqlite-database>:<table>
autocommit - few variations are possible: boolean (False/True) or integer
True - autocommit after each put()
False - no autocommit, commit() only manual
[integer] - autocommit after N[integer] put()
'''
self._item_class = item
self._fields = dict()
#self._fieldnames = None
database, table = self.parse_uri(uri)
# database file
if database:
self._db = database
else:
raise RuntimeError('Empty database name, "%s"' % database)
# database table
if table:
self._table = table.split(' ')[0]
else:
raise RuntimeError('Empty table name, "%s"' % table)
# sqlite connection
try:
self._conn = sqlite3.connect(database)
except sqlite3.OperationalError, err:
raise RuntimeError("%s, database: %s" % (err, database))
self._conn.row_factory = self._dict_factory
self._conn.create_function("REGEXP", 2, _regexp)
# sqlite cursor
self._cursor = self._conn.cursor()
# autocommit data after put()
self._autocommit = autocommit
# commit counter increased every time after put without commit()
self._commit_counter = 0
self._create_table(self._table)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
@staticmethod
def _dict_factory(cursor, row):
''' factory for sqlite3 to return results as dict
'''
d = {}
for idx, col in enumerate(cursor.description):
if col[0] == 'rowid':
d['_id'] = row[idx]
else:
d[col[0]] = row[idx]
return d
@staticmethod
def parse_uri(uri):
''' parse URI
'''
if not uri or uri.find('://') <= 0:
raise RuntimeError('Incorrect URI definition: {}'.format(uri))
backend, rest_uri = uri.split('://')
if backend not in SUPPORTED_BACKENDS:
raise RuntimeError('Unknown backend: {}'.format(backend))
database, table = rest_uri.rsplit(':',1)
return database, table
@property
def fieldnames(self):
''' return fieldnames
'''
if not self._fields:
if self._item_class is not None:
for m in inspect.getmembers(self._item_class):
if m[0] == 'fields' and isinstance(m[1], dict):
self._fields = m[1]
if not self._fields:
raise RuntimeError('Unknown item type, no fields: %s' % self._item_class)
else:
raise RuntimeError('Item class is not defined, %s' % self._item_class)
return self._fields.keys()
def _create_table(self, table_name):
''' create sqlite's table for storing simple dictionaries
'''
if self.fieldnames:
sql_fields = []
for field in self._fields:
if field != '_id':
if 'dblite' in self._fields[field]:
sql_fields.append(' '.join([field, self._fields[field]['dblite']]))
else:
sql_fields.append(field)
sql_fields = ','.join(sql_fields)
SQL = 'CREATE TABLE IF NOT EXISTS %s (%s);' % (table_name, sql_fields)
try:
self._cursor.execute(SQL)
except sqlite3.OperationalError, err:
raise RuntimeError('Create table error, %s, SQL: %s' % (err, SQL))
def _make_item(self, item):
''' make Item class
'''
for field in self._item_class.fields:
if (field in item) and ('dblite_serializer' in self._item_class.fields[field]):
serializer = self._item_class.fields[field]['dblite_serializer']
item[field] = serializer.loads(item[field])
return self._item_class(item)
def get(self, criteria=None, offset=None, limit=None):
''' returns items selected by criteria
If the criteria is not defined, get() returns all items.
'''
if criteria is None and limit is None:
return self._get_all()
elif limit is not None and limit == 1:
return self.get_one(criteria)
else:
return self._get_with_criteria(criteria, offset=offset, limit=limit)
def _get_all(self):
''' return all items
'''
rowid = 0
while True:
SQL_SELECT_MANY = 'SELECT rowid, * FROM %s WHERE rowid > ? LIMIT ?;' % self._table
self._cursor.execute(SQL_SELECT_MANY, (rowid, ITEMS_PER_REQUEST))
items = self._cursor.fetchall()
if len(items) == 0:
break
for item in items:
rowid = item['_id']
yield self._make_item(item)
def get_one(self, criteria):
''' return one item
'''
try:
items = [item for item in self._get_with_criteria(criteria, limit=1)]
return items[0]
except:
return None
def _do_autocommit(self):
''' perform autocommit
'''
# commit()
self._commit_counter += 1
# autocommit as boolean
if isinstance(self._autocommit, bool) and self._autocommit:
self.commit()
self._commit_counter = 0
# autocommit as counter
elif isinstance(self._autocommit, int) and self._autocommit > 0:
if (self._commit_counter % self._autocommit) == 0:
self.commit()
self._commit_counter = 0
def put(self, item):
''' store item in sqlite database
'''
if isinstance(item, self._item_class):
self._put_one(item)
elif isinstance(item, (list, tuple)):
self._put_many(item)
else:
raise RuntimeError('Unknown item(s) type, %s' % type(item))
def _put_one(self, item):
''' store one item in database
'''
# prepare values
values = []
for k, v in item.items():
if k == '_id':
continue
if 'dblite_serializer' in item.fields[k]:
serializer = item.fields[k]['dblite_serializer']
v = serializer.dumps(v)
if v is not None:
v = sqlite3.Binary(buffer(v))
values.append(v)
# check if Item is new => update it
if '_id' in item:
fieldnames = ','.join(['%s=?' % f for f in item if f != '_id'])
values.append(item['_id'])
SQL = 'UPDATE %s SET %s WHERE rowid=?;' % (self._table, fieldnames)
# new Item
else:
fieldnames = ','.join([f for f in item if f != '_id'])
fieldnames_template = ','.join(['?' for f in item if f != '_id'])
SQL = 'INSERT INTO %s (%s) VALUES (%s);' % (self._table, fieldnames, fieldnames_template)
try:
self._cursor.execute(SQL, values)
except sqlite3.OperationalError, err:
raise RuntimeError('Item put() error, %s, SQL: %s, values: %s' % (err, SQL, values) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
self._do_autocommit()
def _put_many(self, items):
''' store items in sqlite database
'''
for item in items:
if not isinstance(item, self._item_class):
raise RuntimeError('Items mismatch for %s and %s' % (self._item_class, type(item)))
self._put_one(item)
def sql(self, sql, params=()):
''' execute sql request and return items
'''
def _items(items):
for item in items:
yield self._item_class(item)
sql = sql.strip()
try:
self._cursor.execute(sql, params)
except sqlite3.OperationalError, err:
raise SQLError('%s, SQL: %s, params: %s' % (err, sql, params) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
if sql.upper().startswith('SELECT'):
return _items(self._cursor.fetchall())
else:
return None
def delete(self, criteria=None, _all=False):
''' delete dictionary(ies) in sqlite database
_all = True - delete all items
'''
if isinstance(criteria, self._item_class):
criteria = {'_id': criteria['_id']}
if criteria is None and not _all:
raise RuntimeError('Criteria is not defined')
SQL = SQLBuilder(self._table, criteria).delete()
self._cursor.execute(SQL)
def __len__(self):
''' return size of storage
'''
SQL = 'SELECT count(*) as count FROM %s;' % self._table
self._cursor.execute(SQL)
return int(self._cursor.fetchone()['count'])
def commit(self):
''' commit changes
'''
try:
self._conn.commit()
except sqlite3.ProgrammingError:
pass
def close(self):
''' close database
'''
self._conn.close()
|
ownport/scrapy-dblite | dblite/__init__.py | Storage.get_one | python | def get_one(self, criteria):
''' return one item
'''
try:
items = [item for item in self._get_with_criteria(criteria, limit=1)]
return items[0]
except:
return None | return one item | train | https://github.com/ownport/scrapy-dblite/blob/6de5021caa31d439478d9808738b046d1db699c9/dblite/__init__.py#L221-L228 | [
"def _get_with_criteria(self, criteria, offset=None, limit=None):\n ''' returns items selected by criteria\n '''\n SQL = SQLBuilder(self._table, criteria).select(offset=offset, limit=limit)\n self._cursor.execute(SQL)\n for item in self._cursor.fetchall():\n yield self._make_item(item)\n"
] | class Storage(object):
''' Storage
store simple dictionaries in sqlite database
'''
def __init__(self, item, uri, autocommit=False):
''' __init__
item - Scrapy item class
uri - URI to sqlite database, sqlite://<sqlite-database>:<table>
autocommit - few variations are possible: boolean (False/True) or integer
True - autocommit after each put()
False - no autocommit, commit() only manual
[integer] - autocommit after N[integer] put()
'''
self._item_class = item
self._fields = dict()
#self._fieldnames = None
database, table = self.parse_uri(uri)
# database file
if database:
self._db = database
else:
raise RuntimeError('Empty database name, "%s"' % database)
# database table
if table:
self._table = table.split(' ')[0]
else:
raise RuntimeError('Empty table name, "%s"' % table)
# sqlite connection
try:
self._conn = sqlite3.connect(database)
except sqlite3.OperationalError, err:
raise RuntimeError("%s, database: %s" % (err, database))
self._conn.row_factory = self._dict_factory
self._conn.create_function("REGEXP", 2, _regexp)
# sqlite cursor
self._cursor = self._conn.cursor()
# autocommit data after put()
self._autocommit = autocommit
# commit counter increased every time after put without commit()
self._commit_counter = 0
self._create_table(self._table)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
@staticmethod
def _dict_factory(cursor, row):
''' factory for sqlite3 to return results as dict
'''
d = {}
for idx, col in enumerate(cursor.description):
if col[0] == 'rowid':
d['_id'] = row[idx]
else:
d[col[0]] = row[idx]
return d
@staticmethod
def parse_uri(uri):
''' parse URI
'''
if not uri or uri.find('://') <= 0:
raise RuntimeError('Incorrect URI definition: {}'.format(uri))
backend, rest_uri = uri.split('://')
if backend not in SUPPORTED_BACKENDS:
raise RuntimeError('Unknown backend: {}'.format(backend))
database, table = rest_uri.rsplit(':',1)
return database, table
@property
def fieldnames(self):
''' return fieldnames
'''
if not self._fields:
if self._item_class is not None:
for m in inspect.getmembers(self._item_class):
if m[0] == 'fields' and isinstance(m[1], dict):
self._fields = m[1]
if not self._fields:
raise RuntimeError('Unknown item type, no fields: %s' % self._item_class)
else:
raise RuntimeError('Item class is not defined, %s' % self._item_class)
return self._fields.keys()
def _create_table(self, table_name):
''' create sqlite's table for storing simple dictionaries
'''
if self.fieldnames:
sql_fields = []
for field in self._fields:
if field != '_id':
if 'dblite' in self._fields[field]:
sql_fields.append(' '.join([field, self._fields[field]['dblite']]))
else:
sql_fields.append(field)
sql_fields = ','.join(sql_fields)
SQL = 'CREATE TABLE IF NOT EXISTS %s (%s);' % (table_name, sql_fields)
try:
self._cursor.execute(SQL)
except sqlite3.OperationalError, err:
raise RuntimeError('Create table error, %s, SQL: %s' % (err, SQL))
def _make_item(self, item):
''' make Item class
'''
for field in self._item_class.fields:
if (field in item) and ('dblite_serializer' in self._item_class.fields[field]):
serializer = self._item_class.fields[field]['dblite_serializer']
item[field] = serializer.loads(item[field])
return self._item_class(item)
def get(self, criteria=None, offset=None, limit=None):
''' returns items selected by criteria
If the criteria is not defined, get() returns all items.
'''
if criteria is None and limit is None:
return self._get_all()
elif limit is not None and limit == 1:
return self.get_one(criteria)
else:
return self._get_with_criteria(criteria, offset=offset, limit=limit)
def _get_all(self):
''' return all items
'''
rowid = 0
while True:
SQL_SELECT_MANY = 'SELECT rowid, * FROM %s WHERE rowid > ? LIMIT ?;' % self._table
self._cursor.execute(SQL_SELECT_MANY, (rowid, ITEMS_PER_REQUEST))
items = self._cursor.fetchall()
if len(items) == 0:
break
for item in items:
rowid = item['_id']
yield self._make_item(item)
def _get_with_criteria(self, criteria, offset=None, limit=None):
''' returns items selected by criteria
'''
SQL = SQLBuilder(self._table, criteria).select(offset=offset, limit=limit)
self._cursor.execute(SQL)
for item in self._cursor.fetchall():
yield self._make_item(item)
def _do_autocommit(self):
''' perform autocommit
'''
# commit()
self._commit_counter += 1
# autocommit as boolean
if isinstance(self._autocommit, bool) and self._autocommit:
self.commit()
self._commit_counter = 0
# autocommit as counter
elif isinstance(self._autocommit, int) and self._autocommit > 0:
if (self._commit_counter % self._autocommit) == 0:
self.commit()
self._commit_counter = 0
def put(self, item):
''' store item in sqlite database
'''
if isinstance(item, self._item_class):
self._put_one(item)
elif isinstance(item, (list, tuple)):
self._put_many(item)
else:
raise RuntimeError('Unknown item(s) type, %s' % type(item))
def _put_one(self, item):
''' store one item in database
'''
# prepare values
values = []
for k, v in item.items():
if k == '_id':
continue
if 'dblite_serializer' in item.fields[k]:
serializer = item.fields[k]['dblite_serializer']
v = serializer.dumps(v)
if v is not None:
v = sqlite3.Binary(buffer(v))
values.append(v)
# check if Item is new => update it
if '_id' in item:
fieldnames = ','.join(['%s=?' % f for f in item if f != '_id'])
values.append(item['_id'])
SQL = 'UPDATE %s SET %s WHERE rowid=?;' % (self._table, fieldnames)
# new Item
else:
fieldnames = ','.join([f for f in item if f != '_id'])
fieldnames_template = ','.join(['?' for f in item if f != '_id'])
SQL = 'INSERT INTO %s (%s) VALUES (%s);' % (self._table, fieldnames, fieldnames_template)
try:
self._cursor.execute(SQL, values)
except sqlite3.OperationalError, err:
raise RuntimeError('Item put() error, %s, SQL: %s, values: %s' % (err, SQL, values) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
self._do_autocommit()
def _put_many(self, items):
''' store items in sqlite database
'''
for item in items:
if not isinstance(item, self._item_class):
raise RuntimeError('Items mismatch for %s and %s' % (self._item_class, type(item)))
self._put_one(item)
def sql(self, sql, params=()):
''' execute sql request and return items
'''
def _items(items):
for item in items:
yield self._item_class(item)
sql = sql.strip()
try:
self._cursor.execute(sql, params)
except sqlite3.OperationalError, err:
raise SQLError('%s, SQL: %s, params: %s' % (err, sql, params) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
if sql.upper().startswith('SELECT'):
return _items(self._cursor.fetchall())
else:
return None
def delete(self, criteria=None, _all=False):
''' delete dictionary(ies) in sqlite database
_all = True - delete all items
'''
if isinstance(criteria, self._item_class):
criteria = {'_id': criteria['_id']}
if criteria is None and not _all:
raise RuntimeError('Criteria is not defined')
SQL = SQLBuilder(self._table, criteria).delete()
self._cursor.execute(SQL)
def __len__(self):
''' return size of storage
'''
SQL = 'SELECT count(*) as count FROM %s;' % self._table
self._cursor.execute(SQL)
return int(self._cursor.fetchone()['count'])
def commit(self):
''' commit changes
'''
try:
self._conn.commit()
except sqlite3.ProgrammingError:
pass
def close(self):
''' close database
'''
self._conn.close()
|
ownport/scrapy-dblite | dblite/__init__.py | Storage._do_autocommit | python | def _do_autocommit(self):
''' perform autocommit
'''
# commit()
self._commit_counter += 1
# autocommit as boolean
if isinstance(self._autocommit, bool) and self._autocommit:
self.commit()
self._commit_counter = 0
# autocommit as counter
elif isinstance(self._autocommit, int) and self._autocommit > 0:
if (self._commit_counter % self._autocommit) == 0:
self.commit()
self._commit_counter = 0 | perform autocommit | train | https://github.com/ownport/scrapy-dblite/blob/6de5021caa31d439478d9808738b046d1db699c9/dblite/__init__.py#L230-L244 | null | class Storage(object):
''' Storage
store simple dictionaries in sqlite database
'''
def __init__(self, item, uri, autocommit=False):
''' __init__
item - Scrapy item class
uri - URI to sqlite database, sqlite://<sqlite-database>:<table>
autocommit - few variations are possible: boolean (False/True) or integer
True - autocommit after each put()
False - no autocommit, commit() only manual
[integer] - autocommit after N[integer] put()
'''
self._item_class = item
self._fields = dict()
#self._fieldnames = None
database, table = self.parse_uri(uri)
# database file
if database:
self._db = database
else:
raise RuntimeError('Empty database name, "%s"' % database)
# database table
if table:
self._table = table.split(' ')[0]
else:
raise RuntimeError('Empty table name, "%s"' % table)
# sqlite connection
try:
self._conn = sqlite3.connect(database)
except sqlite3.OperationalError, err:
raise RuntimeError("%s, database: %s" % (err, database))
self._conn.row_factory = self._dict_factory
self._conn.create_function("REGEXP", 2, _regexp)
# sqlite cursor
self._cursor = self._conn.cursor()
# autocommit data after put()
self._autocommit = autocommit
# commit counter increased every time after put without commit()
self._commit_counter = 0
self._create_table(self._table)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
@staticmethod
def _dict_factory(cursor, row):
''' factory for sqlite3 to return results as dict
'''
d = {}
for idx, col in enumerate(cursor.description):
if col[0] == 'rowid':
d['_id'] = row[idx]
else:
d[col[0]] = row[idx]
return d
@staticmethod
def parse_uri(uri):
''' parse URI
'''
if not uri or uri.find('://') <= 0:
raise RuntimeError('Incorrect URI definition: {}'.format(uri))
backend, rest_uri = uri.split('://')
if backend not in SUPPORTED_BACKENDS:
raise RuntimeError('Unknown backend: {}'.format(backend))
database, table = rest_uri.rsplit(':',1)
return database, table
@property
def fieldnames(self):
''' return fieldnames
'''
if not self._fields:
if self._item_class is not None:
for m in inspect.getmembers(self._item_class):
if m[0] == 'fields' and isinstance(m[1], dict):
self._fields = m[1]
if not self._fields:
raise RuntimeError('Unknown item type, no fields: %s' % self._item_class)
else:
raise RuntimeError('Item class is not defined, %s' % self._item_class)
return self._fields.keys()
def _create_table(self, table_name):
''' create sqlite's table for storing simple dictionaries
'''
if self.fieldnames:
sql_fields = []
for field in self._fields:
if field != '_id':
if 'dblite' in self._fields[field]:
sql_fields.append(' '.join([field, self._fields[field]['dblite']]))
else:
sql_fields.append(field)
sql_fields = ','.join(sql_fields)
SQL = 'CREATE TABLE IF NOT EXISTS %s (%s);' % (table_name, sql_fields)
try:
self._cursor.execute(SQL)
except sqlite3.OperationalError, err:
raise RuntimeError('Create table error, %s, SQL: %s' % (err, SQL))
def _make_item(self, item):
''' make Item class
'''
for field in self._item_class.fields:
if (field in item) and ('dblite_serializer' in self._item_class.fields[field]):
serializer = self._item_class.fields[field]['dblite_serializer']
item[field] = serializer.loads(item[field])
return self._item_class(item)
def get(self, criteria=None, offset=None, limit=None):
''' returns items selected by criteria
If the criteria is not defined, get() returns all items.
'''
if criteria is None and limit is None:
return self._get_all()
elif limit is not None and limit == 1:
return self.get_one(criteria)
else:
return self._get_with_criteria(criteria, offset=offset, limit=limit)
def _get_all(self):
''' return all items
'''
rowid = 0
while True:
SQL_SELECT_MANY = 'SELECT rowid, * FROM %s WHERE rowid > ? LIMIT ?;' % self._table
self._cursor.execute(SQL_SELECT_MANY, (rowid, ITEMS_PER_REQUEST))
items = self._cursor.fetchall()
if len(items) == 0:
break
for item in items:
rowid = item['_id']
yield self._make_item(item)
def _get_with_criteria(self, criteria, offset=None, limit=None):
''' returns items selected by criteria
'''
SQL = SQLBuilder(self._table, criteria).select(offset=offset, limit=limit)
self._cursor.execute(SQL)
for item in self._cursor.fetchall():
yield self._make_item(item)
def get_one(self, criteria):
''' return one item
'''
try:
items = [item for item in self._get_with_criteria(criteria, limit=1)]
return items[0]
except:
return None
def put(self, item):
''' store item in sqlite database
'''
if isinstance(item, self._item_class):
self._put_one(item)
elif isinstance(item, (list, tuple)):
self._put_many(item)
else:
raise RuntimeError('Unknown item(s) type, %s' % type(item))
def _put_one(self, item):
''' store one item in database
'''
# prepare values
values = []
for k, v in item.items():
if k == '_id':
continue
if 'dblite_serializer' in item.fields[k]:
serializer = item.fields[k]['dblite_serializer']
v = serializer.dumps(v)
if v is not None:
v = sqlite3.Binary(buffer(v))
values.append(v)
# check if Item is new => update it
if '_id' in item:
fieldnames = ','.join(['%s=?' % f for f in item if f != '_id'])
values.append(item['_id'])
SQL = 'UPDATE %s SET %s WHERE rowid=?;' % (self._table, fieldnames)
# new Item
else:
fieldnames = ','.join([f for f in item if f != '_id'])
fieldnames_template = ','.join(['?' for f in item if f != '_id'])
SQL = 'INSERT INTO %s (%s) VALUES (%s);' % (self._table, fieldnames, fieldnames_template)
try:
self._cursor.execute(SQL, values)
except sqlite3.OperationalError, err:
raise RuntimeError('Item put() error, %s, SQL: %s, values: %s' % (err, SQL, values) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
self._do_autocommit()
def _put_many(self, items):
''' store items in sqlite database
'''
for item in items:
if not isinstance(item, self._item_class):
raise RuntimeError('Items mismatch for %s and %s' % (self._item_class, type(item)))
self._put_one(item)
def sql(self, sql, params=()):
''' execute sql request and return items
'''
def _items(items):
for item in items:
yield self._item_class(item)
sql = sql.strip()
try:
self._cursor.execute(sql, params)
except sqlite3.OperationalError, err:
raise SQLError('%s, SQL: %s, params: %s' % (err, sql, params) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
if sql.upper().startswith('SELECT'):
return _items(self._cursor.fetchall())
else:
return None
def delete(self, criteria=None, _all=False):
''' delete dictionary(ies) in sqlite database
_all = True - delete all items
'''
if isinstance(criteria, self._item_class):
criteria = {'_id': criteria['_id']}
if criteria is None and not _all:
raise RuntimeError('Criteria is not defined')
SQL = SQLBuilder(self._table, criteria).delete()
self._cursor.execute(SQL)
def __len__(self):
''' return size of storage
'''
SQL = 'SELECT count(*) as count FROM %s;' % self._table
self._cursor.execute(SQL)
return int(self._cursor.fetchone()['count'])
def commit(self):
''' commit changes
'''
try:
self._conn.commit()
except sqlite3.ProgrammingError:
pass
def close(self):
''' close database
'''
self._conn.close()
|
ownport/scrapy-dblite | dblite/__init__.py | Storage.put | python | def put(self, item):
''' store item in sqlite database
'''
if isinstance(item, self._item_class):
self._put_one(item)
elif isinstance(item, (list, tuple)):
self._put_many(item)
else:
raise RuntimeError('Unknown item(s) type, %s' % type(item)) | store item in sqlite database | train | https://github.com/ownport/scrapy-dblite/blob/6de5021caa31d439478d9808738b046d1db699c9/dblite/__init__.py#L246-L254 | null | class Storage(object):
''' Storage
store simple dictionaries in sqlite database
'''
def __init__(self, item, uri, autocommit=False):
''' __init__
item - Scrapy item class
uri - URI to sqlite database, sqlite://<sqlite-database>:<table>
autocommit - few variations are possible: boolean (False/True) or integer
True - autocommit after each put()
False - no autocommit, commit() only manual
[integer] - autocommit after N[integer] put()
'''
self._item_class = item
self._fields = dict()
#self._fieldnames = None
database, table = self.parse_uri(uri)
# database file
if database:
self._db = database
else:
raise RuntimeError('Empty database name, "%s"' % database)
# database table
if table:
self._table = table.split(' ')[0]
else:
raise RuntimeError('Empty table name, "%s"' % table)
# sqlite connection
try:
self._conn = sqlite3.connect(database)
except sqlite3.OperationalError, err:
raise RuntimeError("%s, database: %s" % (err, database))
self._conn.row_factory = self._dict_factory
self._conn.create_function("REGEXP", 2, _regexp)
# sqlite cursor
self._cursor = self._conn.cursor()
# autocommit data after put()
self._autocommit = autocommit
# commit counter increased every time after put without commit()
self._commit_counter = 0
self._create_table(self._table)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
@staticmethod
def _dict_factory(cursor, row):
''' factory for sqlite3 to return results as dict
'''
d = {}
for idx, col in enumerate(cursor.description):
if col[0] == 'rowid':
d['_id'] = row[idx]
else:
d[col[0]] = row[idx]
return d
@staticmethod
def parse_uri(uri):
''' parse URI
'''
if not uri or uri.find('://') <= 0:
raise RuntimeError('Incorrect URI definition: {}'.format(uri))
backend, rest_uri = uri.split('://')
if backend not in SUPPORTED_BACKENDS:
raise RuntimeError('Unknown backend: {}'.format(backend))
database, table = rest_uri.rsplit(':',1)
return database, table
@property
def fieldnames(self):
''' return fieldnames
'''
if not self._fields:
if self._item_class is not None:
for m in inspect.getmembers(self._item_class):
if m[0] == 'fields' and isinstance(m[1], dict):
self._fields = m[1]
if not self._fields:
raise RuntimeError('Unknown item type, no fields: %s' % self._item_class)
else:
raise RuntimeError('Item class is not defined, %s' % self._item_class)
return self._fields.keys()
def _create_table(self, table_name):
''' create sqlite's table for storing simple dictionaries
'''
if self.fieldnames:
sql_fields = []
for field in self._fields:
if field != '_id':
if 'dblite' in self._fields[field]:
sql_fields.append(' '.join([field, self._fields[field]['dblite']]))
else:
sql_fields.append(field)
sql_fields = ','.join(sql_fields)
SQL = 'CREATE TABLE IF NOT EXISTS %s (%s);' % (table_name, sql_fields)
try:
self._cursor.execute(SQL)
except sqlite3.OperationalError, err:
raise RuntimeError('Create table error, %s, SQL: %s' % (err, SQL))
def _make_item(self, item):
''' make Item class
'''
for field in self._item_class.fields:
if (field in item) and ('dblite_serializer' in self._item_class.fields[field]):
serializer = self._item_class.fields[field]['dblite_serializer']
item[field] = serializer.loads(item[field])
return self._item_class(item)
def get(self, criteria=None, offset=None, limit=None):
''' returns items selected by criteria
If the criteria is not defined, get() returns all items.
'''
if criteria is None and limit is None:
return self._get_all()
elif limit is not None and limit == 1:
return self.get_one(criteria)
else:
return self._get_with_criteria(criteria, offset=offset, limit=limit)
def _get_all(self):
''' return all items
'''
rowid = 0
while True:
SQL_SELECT_MANY = 'SELECT rowid, * FROM %s WHERE rowid > ? LIMIT ?;' % self._table
self._cursor.execute(SQL_SELECT_MANY, (rowid, ITEMS_PER_REQUEST))
items = self._cursor.fetchall()
if len(items) == 0:
break
for item in items:
rowid = item['_id']
yield self._make_item(item)
def _get_with_criteria(self, criteria, offset=None, limit=None):
''' returns items selected by criteria
'''
SQL = SQLBuilder(self._table, criteria).select(offset=offset, limit=limit)
self._cursor.execute(SQL)
for item in self._cursor.fetchall():
yield self._make_item(item)
def get_one(self, criteria):
''' return one item
'''
try:
items = [item for item in self._get_with_criteria(criteria, limit=1)]
return items[0]
except:
return None
def _do_autocommit(self):
''' perform autocommit
'''
# commit()
self._commit_counter += 1
# autocommit as boolean
if isinstance(self._autocommit, bool) and self._autocommit:
self.commit()
self._commit_counter = 0
# autocommit as counter
elif isinstance(self._autocommit, int) and self._autocommit > 0:
if (self._commit_counter % self._autocommit) == 0:
self.commit()
self._commit_counter = 0
def _put_one(self, item):
''' store one item in database
'''
# prepare values
values = []
for k, v in item.items():
if k == '_id':
continue
if 'dblite_serializer' in item.fields[k]:
serializer = item.fields[k]['dblite_serializer']
v = serializer.dumps(v)
if v is not None:
v = sqlite3.Binary(buffer(v))
values.append(v)
# check if Item is new => update it
if '_id' in item:
fieldnames = ','.join(['%s=?' % f for f in item if f != '_id'])
values.append(item['_id'])
SQL = 'UPDATE %s SET %s WHERE rowid=?;' % (self._table, fieldnames)
# new Item
else:
fieldnames = ','.join([f for f in item if f != '_id'])
fieldnames_template = ','.join(['?' for f in item if f != '_id'])
SQL = 'INSERT INTO %s (%s) VALUES (%s);' % (self._table, fieldnames, fieldnames_template)
try:
self._cursor.execute(SQL, values)
except sqlite3.OperationalError, err:
raise RuntimeError('Item put() error, %s, SQL: %s, values: %s' % (err, SQL, values) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
self._do_autocommit()
def _put_many(self, items):
''' store items in sqlite database
'''
for item in items:
if not isinstance(item, self._item_class):
raise RuntimeError('Items mismatch for %s and %s' % (self._item_class, type(item)))
self._put_one(item)
def sql(self, sql, params=()):
''' execute sql request and return items
'''
def _items(items):
for item in items:
yield self._item_class(item)
sql = sql.strip()
try:
self._cursor.execute(sql, params)
except sqlite3.OperationalError, err:
raise SQLError('%s, SQL: %s, params: %s' % (err, sql, params) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
if sql.upper().startswith('SELECT'):
return _items(self._cursor.fetchall())
else:
return None
def delete(self, criteria=None, _all=False):
''' delete dictionary(ies) in sqlite database
_all = True - delete all items
'''
if isinstance(criteria, self._item_class):
criteria = {'_id': criteria['_id']}
if criteria is None and not _all:
raise RuntimeError('Criteria is not defined')
SQL = SQLBuilder(self._table, criteria).delete()
self._cursor.execute(SQL)
def __len__(self):
''' return size of storage
'''
SQL = 'SELECT count(*) as count FROM %s;' % self._table
self._cursor.execute(SQL)
return int(self._cursor.fetchone()['count'])
def commit(self):
''' commit changes
'''
try:
self._conn.commit()
except sqlite3.ProgrammingError:
pass
def close(self):
''' close database
'''
self._conn.close()
|
ownport/scrapy-dblite | dblite/__init__.py | Storage._put_one | python | def _put_one(self, item):
''' store one item in database
'''
# prepare values
values = []
for k, v in item.items():
if k == '_id':
continue
if 'dblite_serializer' in item.fields[k]:
serializer = item.fields[k]['dblite_serializer']
v = serializer.dumps(v)
if v is not None:
v = sqlite3.Binary(buffer(v))
values.append(v)
# check if Item is new => update it
if '_id' in item:
fieldnames = ','.join(['%s=?' % f for f in item if f != '_id'])
values.append(item['_id'])
SQL = 'UPDATE %s SET %s WHERE rowid=?;' % (self._table, fieldnames)
# new Item
else:
fieldnames = ','.join([f for f in item if f != '_id'])
fieldnames_template = ','.join(['?' for f in item if f != '_id'])
SQL = 'INSERT INTO %s (%s) VALUES (%s);' % (self._table, fieldnames, fieldnames_template)
try:
self._cursor.execute(SQL, values)
except sqlite3.OperationalError, err:
raise RuntimeError('Item put() error, %s, SQL: %s, values: %s' % (err, SQL, values) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
self._do_autocommit() | store one item in database | train | https://github.com/ownport/scrapy-dblite/blob/6de5021caa31d439478d9808738b046d1db699c9/dblite/__init__.py#L256-L288 | null | class Storage(object):
''' Storage
store simple dictionaries in sqlite database
'''
def __init__(self, item, uri, autocommit=False):
''' __init__
item - Scrapy item class
uri - URI to sqlite database, sqlite://<sqlite-database>:<table>
autocommit - few variations are possible: boolean (False/True) or integer
True - autocommit after each put()
False - no autocommit, commit() only manual
[integer] - autocommit after N[integer] put()
'''
self._item_class = item
self._fields = dict()
#self._fieldnames = None
database, table = self.parse_uri(uri)
# database file
if database:
self._db = database
else:
raise RuntimeError('Empty database name, "%s"' % database)
# database table
if table:
self._table = table.split(' ')[0]
else:
raise RuntimeError('Empty table name, "%s"' % table)
# sqlite connection
try:
self._conn = sqlite3.connect(database)
except sqlite3.OperationalError, err:
raise RuntimeError("%s, database: %s" % (err, database))
self._conn.row_factory = self._dict_factory
self._conn.create_function("REGEXP", 2, _regexp)
# sqlite cursor
self._cursor = self._conn.cursor()
# autocommit data after put()
self._autocommit = autocommit
# commit counter increased every time after put without commit()
self._commit_counter = 0
self._create_table(self._table)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
@staticmethod
def _dict_factory(cursor, row):
''' factory for sqlite3 to return results as dict
'''
d = {}
for idx, col in enumerate(cursor.description):
if col[0] == 'rowid':
d['_id'] = row[idx]
else:
d[col[0]] = row[idx]
return d
@staticmethod
def parse_uri(uri):
''' parse URI
'''
if not uri or uri.find('://') <= 0:
raise RuntimeError('Incorrect URI definition: {}'.format(uri))
backend, rest_uri = uri.split('://')
if backend not in SUPPORTED_BACKENDS:
raise RuntimeError('Unknown backend: {}'.format(backend))
database, table = rest_uri.rsplit(':',1)
return database, table
@property
def fieldnames(self):
''' return fieldnames
'''
if not self._fields:
if self._item_class is not None:
for m in inspect.getmembers(self._item_class):
if m[0] == 'fields' and isinstance(m[1], dict):
self._fields = m[1]
if not self._fields:
raise RuntimeError('Unknown item type, no fields: %s' % self._item_class)
else:
raise RuntimeError('Item class is not defined, %s' % self._item_class)
return self._fields.keys()
def _create_table(self, table_name):
''' create sqlite's table for storing simple dictionaries
'''
if self.fieldnames:
sql_fields = []
for field in self._fields:
if field != '_id':
if 'dblite' in self._fields[field]:
sql_fields.append(' '.join([field, self._fields[field]['dblite']]))
else:
sql_fields.append(field)
sql_fields = ','.join(sql_fields)
SQL = 'CREATE TABLE IF NOT EXISTS %s (%s);' % (table_name, sql_fields)
try:
self._cursor.execute(SQL)
except sqlite3.OperationalError, err:
raise RuntimeError('Create table error, %s, SQL: %s' % (err, SQL))
def _make_item(self, item):
''' make Item class
'''
for field in self._item_class.fields:
if (field in item) and ('dblite_serializer' in self._item_class.fields[field]):
serializer = self._item_class.fields[field]['dblite_serializer']
item[field] = serializer.loads(item[field])
return self._item_class(item)
def get(self, criteria=None, offset=None, limit=None):
''' returns items selected by criteria
If the criteria is not defined, get() returns all items.
'''
if criteria is None and limit is None:
return self._get_all()
elif limit is not None and limit == 1:
return self.get_one(criteria)
else:
return self._get_with_criteria(criteria, offset=offset, limit=limit)
def _get_all(self):
''' return all items
'''
rowid = 0
while True:
SQL_SELECT_MANY = 'SELECT rowid, * FROM %s WHERE rowid > ? LIMIT ?;' % self._table
self._cursor.execute(SQL_SELECT_MANY, (rowid, ITEMS_PER_REQUEST))
items = self._cursor.fetchall()
if len(items) == 0:
break
for item in items:
rowid = item['_id']
yield self._make_item(item)
def _get_with_criteria(self, criteria, offset=None, limit=None):
''' returns items selected by criteria
'''
SQL = SQLBuilder(self._table, criteria).select(offset=offset, limit=limit)
self._cursor.execute(SQL)
for item in self._cursor.fetchall():
yield self._make_item(item)
def get_one(self, criteria):
''' return one item
'''
try:
items = [item for item in self._get_with_criteria(criteria, limit=1)]
return items[0]
except:
return None
def _do_autocommit(self):
''' perform autocommit
'''
# commit()
self._commit_counter += 1
# autocommit as boolean
if isinstance(self._autocommit, bool) and self._autocommit:
self.commit()
self._commit_counter = 0
# autocommit as counter
elif isinstance(self._autocommit, int) and self._autocommit > 0:
if (self._commit_counter % self._autocommit) == 0:
self.commit()
self._commit_counter = 0
def put(self, item):
''' store item in sqlite database
'''
if isinstance(item, self._item_class):
self._put_one(item)
elif isinstance(item, (list, tuple)):
self._put_many(item)
else:
raise RuntimeError('Unknown item(s) type, %s' % type(item))
def _put_many(self, items):
''' store items in sqlite database
'''
for item in items:
if not isinstance(item, self._item_class):
raise RuntimeError('Items mismatch for %s and %s' % (self._item_class, type(item)))
self._put_one(item)
def sql(self, sql, params=()):
''' execute sql request and return items
'''
def _items(items):
for item in items:
yield self._item_class(item)
sql = sql.strip()
try:
self._cursor.execute(sql, params)
except sqlite3.OperationalError, err:
raise SQLError('%s, SQL: %s, params: %s' % (err, sql, params) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
if sql.upper().startswith('SELECT'):
return _items(self._cursor.fetchall())
else:
return None
def delete(self, criteria=None, _all=False):
''' delete dictionary(ies) in sqlite database
_all = True - delete all items
'''
if isinstance(criteria, self._item_class):
criteria = {'_id': criteria['_id']}
if criteria is None and not _all:
raise RuntimeError('Criteria is not defined')
SQL = SQLBuilder(self._table, criteria).delete()
self._cursor.execute(SQL)
def __len__(self):
''' return size of storage
'''
SQL = 'SELECT count(*) as count FROM %s;' % self._table
self._cursor.execute(SQL)
return int(self._cursor.fetchone()['count'])
def commit(self):
''' commit changes
'''
try:
self._conn.commit()
except sqlite3.ProgrammingError:
pass
def close(self):
''' close database
'''
self._conn.close()
|
ownport/scrapy-dblite | dblite/__init__.py | Storage._put_many | python | def _put_many(self, items):
''' store items in sqlite database
'''
for item in items:
if not isinstance(item, self._item_class):
raise RuntimeError('Items mismatch for %s and %s' % (self._item_class, type(item)))
self._put_one(item) | store items in sqlite database | train | https://github.com/ownport/scrapy-dblite/blob/6de5021caa31d439478d9808738b046d1db699c9/dblite/__init__.py#L290-L296 | null | class Storage(object):
''' Storage
store simple dictionaries in sqlite database
'''
def __init__(self, item, uri, autocommit=False):
''' __init__
item - Scrapy item class
uri - URI to sqlite database, sqlite://<sqlite-database>:<table>
autocommit - few variations are possible: boolean (False/True) or integer
True - autocommit after each put()
False - no autocommit, commit() only manual
[integer] - autocommit after N[integer] put()
'''
self._item_class = item
self._fields = dict()
#self._fieldnames = None
database, table = self.parse_uri(uri)
# database file
if database:
self._db = database
else:
raise RuntimeError('Empty database name, "%s"' % database)
# database table
if table:
self._table = table.split(' ')[0]
else:
raise RuntimeError('Empty table name, "%s"' % table)
# sqlite connection
try:
self._conn = sqlite3.connect(database)
except sqlite3.OperationalError, err:
raise RuntimeError("%s, database: %s" % (err, database))
self._conn.row_factory = self._dict_factory
self._conn.create_function("REGEXP", 2, _regexp)
# sqlite cursor
self._cursor = self._conn.cursor()
# autocommit data after put()
self._autocommit = autocommit
# commit counter increased every time after put without commit()
self._commit_counter = 0
self._create_table(self._table)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
@staticmethod
def _dict_factory(cursor, row):
''' factory for sqlite3 to return results as dict
'''
d = {}
for idx, col in enumerate(cursor.description):
if col[0] == 'rowid':
d['_id'] = row[idx]
else:
d[col[0]] = row[idx]
return d
@staticmethod
def parse_uri(uri):
''' parse URI
'''
if not uri or uri.find('://') <= 0:
raise RuntimeError('Incorrect URI definition: {}'.format(uri))
backend, rest_uri = uri.split('://')
if backend not in SUPPORTED_BACKENDS:
raise RuntimeError('Unknown backend: {}'.format(backend))
database, table = rest_uri.rsplit(':',1)
return database, table
@property
def fieldnames(self):
''' return fieldnames
'''
if not self._fields:
if self._item_class is not None:
for m in inspect.getmembers(self._item_class):
if m[0] == 'fields' and isinstance(m[1], dict):
self._fields = m[1]
if not self._fields:
raise RuntimeError('Unknown item type, no fields: %s' % self._item_class)
else:
raise RuntimeError('Item class is not defined, %s' % self._item_class)
return self._fields.keys()
def _create_table(self, table_name):
''' create sqlite's table for storing simple dictionaries
'''
if self.fieldnames:
sql_fields = []
for field in self._fields:
if field != '_id':
if 'dblite' in self._fields[field]:
sql_fields.append(' '.join([field, self._fields[field]['dblite']]))
else:
sql_fields.append(field)
sql_fields = ','.join(sql_fields)
SQL = 'CREATE TABLE IF NOT EXISTS %s (%s);' % (table_name, sql_fields)
try:
self._cursor.execute(SQL)
except sqlite3.OperationalError, err:
raise RuntimeError('Create table error, %s, SQL: %s' % (err, SQL))
def _make_item(self, item):
''' make Item class
'''
for field in self._item_class.fields:
if (field in item) and ('dblite_serializer' in self._item_class.fields[field]):
serializer = self._item_class.fields[field]['dblite_serializer']
item[field] = serializer.loads(item[field])
return self._item_class(item)
def get(self, criteria=None, offset=None, limit=None):
''' returns items selected by criteria
If the criteria is not defined, get() returns all items.
'''
if criteria is None and limit is None:
return self._get_all()
elif limit is not None and limit == 1:
return self.get_one(criteria)
else:
return self._get_with_criteria(criteria, offset=offset, limit=limit)
def _get_all(self):
''' return all items
'''
rowid = 0
while True:
SQL_SELECT_MANY = 'SELECT rowid, * FROM %s WHERE rowid > ? LIMIT ?;' % self._table
self._cursor.execute(SQL_SELECT_MANY, (rowid, ITEMS_PER_REQUEST))
items = self._cursor.fetchall()
if len(items) == 0:
break
for item in items:
rowid = item['_id']
yield self._make_item(item)
def _get_with_criteria(self, criteria, offset=None, limit=None):
''' returns items selected by criteria
'''
SQL = SQLBuilder(self._table, criteria).select(offset=offset, limit=limit)
self._cursor.execute(SQL)
for item in self._cursor.fetchall():
yield self._make_item(item)
def get_one(self, criteria):
''' return one item
'''
try:
items = [item for item in self._get_with_criteria(criteria, limit=1)]
return items[0]
except:
return None
def _do_autocommit(self):
''' perform autocommit
'''
# commit()
self._commit_counter += 1
# autocommit as boolean
if isinstance(self._autocommit, bool) and self._autocommit:
self.commit()
self._commit_counter = 0
# autocommit as counter
elif isinstance(self._autocommit, int) and self._autocommit > 0:
if (self._commit_counter % self._autocommit) == 0:
self.commit()
self._commit_counter = 0
def put(self, item):
''' store item in sqlite database
'''
if isinstance(item, self._item_class):
self._put_one(item)
elif isinstance(item, (list, tuple)):
self._put_many(item)
else:
raise RuntimeError('Unknown item(s) type, %s' % type(item))
def _put_one(self, item):
''' store one item in database
'''
# prepare values
values = []
for k, v in item.items():
if k == '_id':
continue
if 'dblite_serializer' in item.fields[k]:
serializer = item.fields[k]['dblite_serializer']
v = serializer.dumps(v)
if v is not None:
v = sqlite3.Binary(buffer(v))
values.append(v)
# check if Item is new => update it
if '_id' in item:
fieldnames = ','.join(['%s=?' % f for f in item if f != '_id'])
values.append(item['_id'])
SQL = 'UPDATE %s SET %s WHERE rowid=?;' % (self._table, fieldnames)
# new Item
else:
fieldnames = ','.join([f for f in item if f != '_id'])
fieldnames_template = ','.join(['?' for f in item if f != '_id'])
SQL = 'INSERT INTO %s (%s) VALUES (%s);' % (self._table, fieldnames, fieldnames_template)
try:
self._cursor.execute(SQL, values)
except sqlite3.OperationalError, err:
raise RuntimeError('Item put() error, %s, SQL: %s, values: %s' % (err, SQL, values) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
self._do_autocommit()
def sql(self, sql, params=()):
''' execute sql request and return items
'''
def _items(items):
for item in items:
yield self._item_class(item)
sql = sql.strip()
try:
self._cursor.execute(sql, params)
except sqlite3.OperationalError, err:
raise SQLError('%s, SQL: %s, params: %s' % (err, sql, params) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
if sql.upper().startswith('SELECT'):
return _items(self._cursor.fetchall())
else:
return None
def delete(self, criteria=None, _all=False):
''' delete dictionary(ies) in sqlite database
_all = True - delete all items
'''
if isinstance(criteria, self._item_class):
criteria = {'_id': criteria['_id']}
if criteria is None and not _all:
raise RuntimeError('Criteria is not defined')
SQL = SQLBuilder(self._table, criteria).delete()
self._cursor.execute(SQL)
def __len__(self):
''' return size of storage
'''
SQL = 'SELECT count(*) as count FROM %s;' % self._table
self._cursor.execute(SQL)
return int(self._cursor.fetchone()['count'])
def commit(self):
''' commit changes
'''
try:
self._conn.commit()
except sqlite3.ProgrammingError:
pass
def close(self):
''' close database
'''
self._conn.close()
|
ownport/scrapy-dblite | dblite/__init__.py | Storage.sql | python | def sql(self, sql, params=()):
''' execute sql request and return items
'''
def _items(items):
for item in items:
yield self._item_class(item)
sql = sql.strip()
try:
self._cursor.execute(sql, params)
except sqlite3.OperationalError, err:
raise SQLError('%s, SQL: %s, params: %s' % (err, sql, params) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
if sql.upper().startswith('SELECT'):
return _items(self._cursor.fetchall())
else:
return None | execute sql request and return items | train | https://github.com/ownport/scrapy-dblite/blob/6de5021caa31d439478d9808738b046d1db699c9/dblite/__init__.py#L298-L316 | [
"def _items(items):\n for item in items:\n yield self._item_class(item)\n"
] | class Storage(object):
''' Storage
store simple dictionaries in sqlite database
'''
def __init__(self, item, uri, autocommit=False):
''' __init__
item - Scrapy item class
uri - URI to sqlite database, sqlite://<sqlite-database>:<table>
autocommit - few variations are possible: boolean (False/True) or integer
True - autocommit after each put()
False - no autocommit, commit() only manual
[integer] - autocommit after N[integer] put()
'''
self._item_class = item
self._fields = dict()
#self._fieldnames = None
database, table = self.parse_uri(uri)
# database file
if database:
self._db = database
else:
raise RuntimeError('Empty database name, "%s"' % database)
# database table
if table:
self._table = table.split(' ')[0]
else:
raise RuntimeError('Empty table name, "%s"' % table)
# sqlite connection
try:
self._conn = sqlite3.connect(database)
except sqlite3.OperationalError, err:
raise RuntimeError("%s, database: %s" % (err, database))
self._conn.row_factory = self._dict_factory
self._conn.create_function("REGEXP", 2, _regexp)
# sqlite cursor
self._cursor = self._conn.cursor()
# autocommit data after put()
self._autocommit = autocommit
# commit counter increased every time after put without commit()
self._commit_counter = 0
self._create_table(self._table)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
@staticmethod
def _dict_factory(cursor, row):
''' factory for sqlite3 to return results as dict
'''
d = {}
for idx, col in enumerate(cursor.description):
if col[0] == 'rowid':
d['_id'] = row[idx]
else:
d[col[0]] = row[idx]
return d
@staticmethod
def parse_uri(uri):
''' parse URI
'''
if not uri or uri.find('://') <= 0:
raise RuntimeError('Incorrect URI definition: {}'.format(uri))
backend, rest_uri = uri.split('://')
if backend not in SUPPORTED_BACKENDS:
raise RuntimeError('Unknown backend: {}'.format(backend))
database, table = rest_uri.rsplit(':',1)
return database, table
@property
def fieldnames(self):
''' return fieldnames
'''
if not self._fields:
if self._item_class is not None:
for m in inspect.getmembers(self._item_class):
if m[0] == 'fields' and isinstance(m[1], dict):
self._fields = m[1]
if not self._fields:
raise RuntimeError('Unknown item type, no fields: %s' % self._item_class)
else:
raise RuntimeError('Item class is not defined, %s' % self._item_class)
return self._fields.keys()
def _create_table(self, table_name):
''' create sqlite's table for storing simple dictionaries
'''
if self.fieldnames:
sql_fields = []
for field in self._fields:
if field != '_id':
if 'dblite' in self._fields[field]:
sql_fields.append(' '.join([field, self._fields[field]['dblite']]))
else:
sql_fields.append(field)
sql_fields = ','.join(sql_fields)
SQL = 'CREATE TABLE IF NOT EXISTS %s (%s);' % (table_name, sql_fields)
try:
self._cursor.execute(SQL)
except sqlite3.OperationalError, err:
raise RuntimeError('Create table error, %s, SQL: %s' % (err, SQL))
def _make_item(self, item):
''' make Item class
'''
for field in self._item_class.fields:
if (field in item) and ('dblite_serializer' in self._item_class.fields[field]):
serializer = self._item_class.fields[field]['dblite_serializer']
item[field] = serializer.loads(item[field])
return self._item_class(item)
def get(self, criteria=None, offset=None, limit=None):
''' returns items selected by criteria
If the criteria is not defined, get() returns all items.
'''
if criteria is None and limit is None:
return self._get_all()
elif limit is not None and limit == 1:
return self.get_one(criteria)
else:
return self._get_with_criteria(criteria, offset=offset, limit=limit)
def _get_all(self):
''' return all items
'''
rowid = 0
while True:
SQL_SELECT_MANY = 'SELECT rowid, * FROM %s WHERE rowid > ? LIMIT ?;' % self._table
self._cursor.execute(SQL_SELECT_MANY, (rowid, ITEMS_PER_REQUEST))
items = self._cursor.fetchall()
if len(items) == 0:
break
for item in items:
rowid = item['_id']
yield self._make_item(item)
def _get_with_criteria(self, criteria, offset=None, limit=None):
''' returns items selected by criteria
'''
SQL = SQLBuilder(self._table, criteria).select(offset=offset, limit=limit)
self._cursor.execute(SQL)
for item in self._cursor.fetchall():
yield self._make_item(item)
def get_one(self, criteria):
''' return one item
'''
try:
items = [item for item in self._get_with_criteria(criteria, limit=1)]
return items[0]
except:
return None
def _do_autocommit(self):
''' perform autocommit
'''
# commit()
self._commit_counter += 1
# autocommit as boolean
if isinstance(self._autocommit, bool) and self._autocommit:
self.commit()
self._commit_counter = 0
# autocommit as counter
elif isinstance(self._autocommit, int) and self._autocommit > 0:
if (self._commit_counter % self._autocommit) == 0:
self.commit()
self._commit_counter = 0
def put(self, item):
''' store item in sqlite database
'''
if isinstance(item, self._item_class):
self._put_one(item)
elif isinstance(item, (list, tuple)):
self._put_many(item)
else:
raise RuntimeError('Unknown item(s) type, %s' % type(item))
def _put_one(self, item):
''' store one item in database
'''
# prepare values
values = []
for k, v in item.items():
if k == '_id':
continue
if 'dblite_serializer' in item.fields[k]:
serializer = item.fields[k]['dblite_serializer']
v = serializer.dumps(v)
if v is not None:
v = sqlite3.Binary(buffer(v))
values.append(v)
# check if Item is new => update it
if '_id' in item:
fieldnames = ','.join(['%s=?' % f for f in item if f != '_id'])
values.append(item['_id'])
SQL = 'UPDATE %s SET %s WHERE rowid=?;' % (self._table, fieldnames)
# new Item
else:
fieldnames = ','.join([f for f in item if f != '_id'])
fieldnames_template = ','.join(['?' for f in item if f != '_id'])
SQL = 'INSERT INTO %s (%s) VALUES (%s);' % (self._table, fieldnames, fieldnames_template)
try:
self._cursor.execute(SQL, values)
except sqlite3.OperationalError, err:
raise RuntimeError('Item put() error, %s, SQL: %s, values: %s' % (err, SQL, values) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
self._do_autocommit()
def _put_many(self, items):
''' store items in sqlite database
'''
for item in items:
if not isinstance(item, self._item_class):
raise RuntimeError('Items mismatch for %s and %s' % (self._item_class, type(item)))
self._put_one(item)
def delete(self, criteria=None, _all=False):
''' delete dictionary(ies) in sqlite database
_all = True - delete all items
'''
if isinstance(criteria, self._item_class):
criteria = {'_id': criteria['_id']}
if criteria is None and not _all:
raise RuntimeError('Criteria is not defined')
SQL = SQLBuilder(self._table, criteria).delete()
self._cursor.execute(SQL)
def __len__(self):
''' return size of storage
'''
SQL = 'SELECT count(*) as count FROM %s;' % self._table
self._cursor.execute(SQL)
return int(self._cursor.fetchone()['count'])
def commit(self):
''' commit changes
'''
try:
self._conn.commit()
except sqlite3.ProgrammingError:
pass
def close(self):
''' close database
'''
self._conn.close()
|
ownport/scrapy-dblite | dblite/__init__.py | Storage.delete | python | def delete(self, criteria=None, _all=False):
''' delete dictionary(ies) in sqlite database
_all = True - delete all items
'''
if isinstance(criteria, self._item_class):
criteria = {'_id': criteria['_id']}
if criteria is None and not _all:
raise RuntimeError('Criteria is not defined')
SQL = SQLBuilder(self._table, criteria).delete()
self._cursor.execute(SQL) | delete dictionary(ies) in sqlite database
_all = True - delete all items | train | https://github.com/ownport/scrapy-dblite/blob/6de5021caa31d439478d9808738b046d1db699c9/dblite/__init__.py#L318-L330 | [
"def delete(self):\n ''' return DELETE SQL\n '''\n SQL = 'DELETE FROM %s' % self._table\n if self._selectors:\n SQL = ' '.join([SQL, 'WHERE', self._selectors]).strip()\n\n return SQL\n"
] | class Storage(object):
''' Storage
store simple dictionaries in sqlite database
'''
def __init__(self, item, uri, autocommit=False):
''' __init__
item - Scrapy item class
uri - URI to sqlite database, sqlite://<sqlite-database>:<table>
autocommit - few variations are possible: boolean (False/True) or integer
True - autocommit after each put()
False - no autocommit, commit() only manual
[integer] - autocommit after N[integer] put()
'''
self._item_class = item
self._fields = dict()
#self._fieldnames = None
database, table = self.parse_uri(uri)
# database file
if database:
self._db = database
else:
raise RuntimeError('Empty database name, "%s"' % database)
# database table
if table:
self._table = table.split(' ')[0]
else:
raise RuntimeError('Empty table name, "%s"' % table)
# sqlite connection
try:
self._conn = sqlite3.connect(database)
except sqlite3.OperationalError, err:
raise RuntimeError("%s, database: %s" % (err, database))
self._conn.row_factory = self._dict_factory
self._conn.create_function("REGEXP", 2, _regexp)
# sqlite cursor
self._cursor = self._conn.cursor()
# autocommit data after put()
self._autocommit = autocommit
# commit counter increased every time after put without commit()
self._commit_counter = 0
self._create_table(self._table)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
@staticmethod
def _dict_factory(cursor, row):
''' factory for sqlite3 to return results as dict
'''
d = {}
for idx, col in enumerate(cursor.description):
if col[0] == 'rowid':
d['_id'] = row[idx]
else:
d[col[0]] = row[idx]
return d
@staticmethod
def parse_uri(uri):
''' parse URI
'''
if not uri or uri.find('://') <= 0:
raise RuntimeError('Incorrect URI definition: {}'.format(uri))
backend, rest_uri = uri.split('://')
if backend not in SUPPORTED_BACKENDS:
raise RuntimeError('Unknown backend: {}'.format(backend))
database, table = rest_uri.rsplit(':',1)
return database, table
@property
def fieldnames(self):
''' return fieldnames
'''
if not self._fields:
if self._item_class is not None:
for m in inspect.getmembers(self._item_class):
if m[0] == 'fields' and isinstance(m[1], dict):
self._fields = m[1]
if not self._fields:
raise RuntimeError('Unknown item type, no fields: %s' % self._item_class)
else:
raise RuntimeError('Item class is not defined, %s' % self._item_class)
return self._fields.keys()
def _create_table(self, table_name):
''' create sqlite's table for storing simple dictionaries
'''
if self.fieldnames:
sql_fields = []
for field in self._fields:
if field != '_id':
if 'dblite' in self._fields[field]:
sql_fields.append(' '.join([field, self._fields[field]['dblite']]))
else:
sql_fields.append(field)
sql_fields = ','.join(sql_fields)
SQL = 'CREATE TABLE IF NOT EXISTS %s (%s);' % (table_name, sql_fields)
try:
self._cursor.execute(SQL)
except sqlite3.OperationalError, err:
raise RuntimeError('Create table error, %s, SQL: %s' % (err, SQL))
def _make_item(self, item):
''' make Item class
'''
for field in self._item_class.fields:
if (field in item) and ('dblite_serializer' in self._item_class.fields[field]):
serializer = self._item_class.fields[field]['dblite_serializer']
item[field] = serializer.loads(item[field])
return self._item_class(item)
def get(self, criteria=None, offset=None, limit=None):
''' returns items selected by criteria
If the criteria is not defined, get() returns all items.
'''
if criteria is None and limit is None:
return self._get_all()
elif limit is not None and limit == 1:
return self.get_one(criteria)
else:
return self._get_with_criteria(criteria, offset=offset, limit=limit)
def _get_all(self):
''' return all items
'''
rowid = 0
while True:
SQL_SELECT_MANY = 'SELECT rowid, * FROM %s WHERE rowid > ? LIMIT ?;' % self._table
self._cursor.execute(SQL_SELECT_MANY, (rowid, ITEMS_PER_REQUEST))
items = self._cursor.fetchall()
if len(items) == 0:
break
for item in items:
rowid = item['_id']
yield self._make_item(item)
def _get_with_criteria(self, criteria, offset=None, limit=None):
''' returns items selected by criteria
'''
SQL = SQLBuilder(self._table, criteria).select(offset=offset, limit=limit)
self._cursor.execute(SQL)
for item in self._cursor.fetchall():
yield self._make_item(item)
def get_one(self, criteria):
''' return one item
'''
try:
items = [item for item in self._get_with_criteria(criteria, limit=1)]
return items[0]
except:
return None
def _do_autocommit(self):
''' perform autocommit
'''
# commit()
self._commit_counter += 1
# autocommit as boolean
if isinstance(self._autocommit, bool) and self._autocommit:
self.commit()
self._commit_counter = 0
# autocommit as counter
elif isinstance(self._autocommit, int) and self._autocommit > 0:
if (self._commit_counter % self._autocommit) == 0:
self.commit()
self._commit_counter = 0
def put(self, item):
''' store item in sqlite database
'''
if isinstance(item, self._item_class):
self._put_one(item)
elif isinstance(item, (list, tuple)):
self._put_many(item)
else:
raise RuntimeError('Unknown item(s) type, %s' % type(item))
def _put_one(self, item):
''' store one item in database
'''
# prepare values
values = []
for k, v in item.items():
if k == '_id':
continue
if 'dblite_serializer' in item.fields[k]:
serializer = item.fields[k]['dblite_serializer']
v = serializer.dumps(v)
if v is not None:
v = sqlite3.Binary(buffer(v))
values.append(v)
# check if Item is new => update it
if '_id' in item:
fieldnames = ','.join(['%s=?' % f for f in item if f != '_id'])
values.append(item['_id'])
SQL = 'UPDATE %s SET %s WHERE rowid=?;' % (self._table, fieldnames)
# new Item
else:
fieldnames = ','.join([f for f in item if f != '_id'])
fieldnames_template = ','.join(['?' for f in item if f != '_id'])
SQL = 'INSERT INTO %s (%s) VALUES (%s);' % (self._table, fieldnames, fieldnames_template)
try:
self._cursor.execute(SQL, values)
except sqlite3.OperationalError, err:
raise RuntimeError('Item put() error, %s, SQL: %s, values: %s' % (err, SQL, values) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
self._do_autocommit()
def _put_many(self, items):
''' store items in sqlite database
'''
for item in items:
if not isinstance(item, self._item_class):
raise RuntimeError('Items mismatch for %s and %s' % (self._item_class, type(item)))
self._put_one(item)
def sql(self, sql, params=()):
''' execute sql request and return items
'''
def _items(items):
for item in items:
yield self._item_class(item)
sql = sql.strip()
try:
self._cursor.execute(sql, params)
except sqlite3.OperationalError, err:
raise SQLError('%s, SQL: %s, params: %s' % (err, sql, params) )
except sqlite3.IntegrityError:
raise DuplicateItem('Duplicate item, %s' % item)
if sql.upper().startswith('SELECT'):
return _items(self._cursor.fetchall())
else:
return None
def __len__(self):
''' return size of storage
'''
SQL = 'SELECT count(*) as count FROM %s;' % self._table
self._cursor.execute(SQL)
return int(self._cursor.fetchone()['count'])
def commit(self):
''' commit changes
'''
try:
self._conn.commit()
except sqlite3.ProgrammingError:
pass
def close(self):
''' close database
'''
self._conn.close()
|
ownport/scrapy-dblite | dblite/query.py | SQLBuilder.select | python | def select(self, fields=['rowid', '*'], offset=None, limit=None):
''' return SELECT SQL
'''
# base SQL
SQL = 'SELECT %s FROM %s' % (','.join(fields), self._table)
# selectors
if self._selectors:
SQL = ' '.join([SQL, 'WHERE', self._selectors]).strip()
# modifiers
if self._modifiers:
SQL = ' '.join([SQL, self._modifiers])
# limit
if limit is not None and isinstance(limit, int):
SQL = ' '.join((SQL, 'LIMIT %s' % limit))
# offset
if (limit is not None) and (offset is not None) and isinstance(offset, int):
SQL = ' '.join((SQL, 'OFFSET %s' % offset))
return ''.join((SQL, ';')) | return SELECT SQL | train | https://github.com/ownport/scrapy-dblite/blob/6de5021caa31d439478d9808738b046d1db699c9/dblite/query.py#L37-L59 | null | class SQLBuilder(object):
''' SQLBuilder
'''
def __init__(self, table, params):
'''__init__
table -
params - is dictionary
'''
self._table = table
self._selectors = ''
self._modifiers = ''
self._selectors, self._modifiers = self._parse(params)
def select(self, fields=['rowid', '*'], offset=None, limit=None):
''' return SELECT SQL
'''
# base SQL
SQL = 'SELECT %s FROM %s' % (','.join(fields), self._table)
# selectors
if self._selectors:
SQL = ' '.join([SQL, 'WHERE', self._selectors]).strip()
# modifiers
if self._modifiers:
SQL = ' '.join([SQL, self._modifiers])
# limit
if limit is not None and isinstance(limit, int):
SQL = ' '.join((SQL, 'LIMIT %s' % limit))
# offset
if (limit is not None) and (offset is not None) and isinstance(offset, int):
SQL = ' '.join((SQL, 'OFFSET %s' % offset))
return ''.join((SQL, ';'))
def delete(self):
''' return DELETE SQL
'''
SQL = 'DELETE FROM %s' % self._table
if self._selectors:
SQL = ' '.join([SQL, 'WHERE', self._selectors]).strip()
return SQL
def _parse(self, params):
''' parse parameters and return SQL
'''
if not isinstance(params, dict):
return None, None
if len(params) == 0:
return None, None
selectors = list()
modifiers = list()
for k in params.keys():
if k in LOGICAL_OPERATORS:
selectors.append(self._logical(k, params[k]))
elif k in QUERY_MODIFIERS:
modifiers.append(self._modifier(k, params[k]))
else:
if k == '_id':
selectors.append("rowid%s" % (self._value_wrapper(params[k])))
else:
selectors.append("%s%s" % (k, self._value_wrapper(params[k])))
_selectors = ' AND '.join(selectors).strip()
_modifiers = ' '.join(modifiers).strip()
return _selectors, _modifiers
def _logical(self, operator, params):
'''
$and: joins query clauses with a logical AND returns all items
that match the conditions of both clauses
$or: joins query clauses with a logical OR returns all items
that match the conditions of either clause.
'''
result = list()
if isinstance(params, dict):
for k,v in params.items():
selectors, modifiers = self._parse(dict([(k, v),]))
result.append("(%s)" % selectors)
elif isinstance(params, (list, tuple)):
for v in params:
selectors, modifiers = self._parse(v)
result.append("(%s)" % selectors)
else:
raise RuntimeError('Unknow parameter type, %s:%s' % (type(params), params))
if operator == '$and':
return ' AND '.join(result)
elif operator == '$or':
return ' OR '.join(result)
else:
raise RuntimeError('Unknown operator, %s' % operator)
def _modifier(self, operator, params):
'''
$orderby: sorts the results of a query in ascending (1) or descending (-1) order.
'''
if operator == '$orderby':
order_types = {-1: 'DESC', 1: 'ASC'}
if not isinstance(params, dict):
raise RuntimeError('Incorrect parameter type, %s' % params)
return 'ORDER BY %s' % ','.join(["%s %s" % (p, order_types[params[p]]) for p in params])
else:
raise RuntimeError('Unknown operator, %s' % operator)
def _value_wrapper(self, value):
''' wrapper for values
'''
if isinstance(value, (int, float,)):
return '=%s' % value
elif isinstance(value, (str, unicode)):
value = value.strip()
# LIKE
if RE_LIKE.match(value):
return ' LIKE %s' % repr(RE_LIKE.match(value).group('RE_LIKE'))
# REGEXP
elif RE_REGEXP.match(value):
return ' REGEXP %s' % repr(RE_REGEXP.search(value).group('RE_REGEXP'))
else:
return '=%s' % repr(value)
elif value is None:
return ' ISNULL'
|
ownport/scrapy-dblite | dblite/query.py | SQLBuilder.delete | python | def delete(self):
''' return DELETE SQL
'''
SQL = 'DELETE FROM %s' % self._table
if self._selectors:
SQL = ' '.join([SQL, 'WHERE', self._selectors]).strip()
return SQL | return DELETE SQL | train | https://github.com/ownport/scrapy-dblite/blob/6de5021caa31d439478d9808738b046d1db699c9/dblite/query.py#L61-L68 | null | class SQLBuilder(object):
''' SQLBuilder
'''
def __init__(self, table, params):
'''__init__
table -
params - is dictionary
'''
self._table = table
self._selectors = ''
self._modifiers = ''
self._selectors, self._modifiers = self._parse(params)
def select(self, fields=['rowid', '*'], offset=None, limit=None):
''' return SELECT SQL
'''
# base SQL
SQL = 'SELECT %s FROM %s' % (','.join(fields), self._table)
# selectors
if self._selectors:
SQL = ' '.join([SQL, 'WHERE', self._selectors]).strip()
# modifiers
if self._modifiers:
SQL = ' '.join([SQL, self._modifiers])
# limit
if limit is not None and isinstance(limit, int):
SQL = ' '.join((SQL, 'LIMIT %s' % limit))
# offset
if (limit is not None) and (offset is not None) and isinstance(offset, int):
SQL = ' '.join((SQL, 'OFFSET %s' % offset))
return ''.join((SQL, ';'))
def delete(self):
''' return DELETE SQL
'''
SQL = 'DELETE FROM %s' % self._table
if self._selectors:
SQL = ' '.join([SQL, 'WHERE', self._selectors]).strip()
return SQL
def _parse(self, params):
''' parse parameters and return SQL
'''
if not isinstance(params, dict):
return None, None
if len(params) == 0:
return None, None
selectors = list()
modifiers = list()
for k in params.keys():
if k in LOGICAL_OPERATORS:
selectors.append(self._logical(k, params[k]))
elif k in QUERY_MODIFIERS:
modifiers.append(self._modifier(k, params[k]))
else:
if k == '_id':
selectors.append("rowid%s" % (self._value_wrapper(params[k])))
else:
selectors.append("%s%s" % (k, self._value_wrapper(params[k])))
_selectors = ' AND '.join(selectors).strip()
_modifiers = ' '.join(modifiers).strip()
return _selectors, _modifiers
def _logical(self, operator, params):
'''
$and: joins query clauses with a logical AND returns all items
that match the conditions of both clauses
$or: joins query clauses with a logical OR returns all items
that match the conditions of either clause.
'''
result = list()
if isinstance(params, dict):
for k,v in params.items():
selectors, modifiers = self._parse(dict([(k, v),]))
result.append("(%s)" % selectors)
elif isinstance(params, (list, tuple)):
for v in params:
selectors, modifiers = self._parse(v)
result.append("(%s)" % selectors)
else:
raise RuntimeError('Unknow parameter type, %s:%s' % (type(params), params))
if operator == '$and':
return ' AND '.join(result)
elif operator == '$or':
return ' OR '.join(result)
else:
raise RuntimeError('Unknown operator, %s' % operator)
def _modifier(self, operator, params):
'''
$orderby: sorts the results of a query in ascending (1) or descending (-1) order.
'''
if operator == '$orderby':
order_types = {-1: 'DESC', 1: 'ASC'}
if not isinstance(params, dict):
raise RuntimeError('Incorrect parameter type, %s' % params)
return 'ORDER BY %s' % ','.join(["%s %s" % (p, order_types[params[p]]) for p in params])
else:
raise RuntimeError('Unknown operator, %s' % operator)
def _value_wrapper(self, value):
''' wrapper for values
'''
if isinstance(value, (int, float,)):
return '=%s' % value
elif isinstance(value, (str, unicode)):
value = value.strip()
# LIKE
if RE_LIKE.match(value):
return ' LIKE %s' % repr(RE_LIKE.match(value).group('RE_LIKE'))
# REGEXP
elif RE_REGEXP.match(value):
return ' REGEXP %s' % repr(RE_REGEXP.search(value).group('RE_REGEXP'))
else:
return '=%s' % repr(value)
elif value is None:
return ' ISNULL'
|
ownport/scrapy-dblite | dblite/query.py | SQLBuilder._parse | python | def _parse(self, params):
''' parse parameters and return SQL
'''
if not isinstance(params, dict):
return None, None
if len(params) == 0:
return None, None
selectors = list()
modifiers = list()
for k in params.keys():
if k in LOGICAL_OPERATORS:
selectors.append(self._logical(k, params[k]))
elif k in QUERY_MODIFIERS:
modifiers.append(self._modifier(k, params[k]))
else:
if k == '_id':
selectors.append("rowid%s" % (self._value_wrapper(params[k])))
else:
selectors.append("%s%s" % (k, self._value_wrapper(params[k])))
_selectors = ' AND '.join(selectors).strip()
_modifiers = ' '.join(modifiers).strip()
return _selectors, _modifiers | parse parameters and return SQL | train | https://github.com/ownport/scrapy-dblite/blob/6de5021caa31d439478d9808738b046d1db699c9/dblite/query.py#L70-L98 | [
"def _logical(self, operator, params):\n ''' \n $and: joins query clauses with a logical AND returns all items \n that match the conditions of both clauses\n $or: joins query clauses with a logical OR returns all items \n that match the conditions of either clause.\n '''\n\n ... | class SQLBuilder(object):
''' SQLBuilder
'''
def __init__(self, table, params):
'''__init__
table -
params - is dictionary
'''
self._table = table
self._selectors = ''
self._modifiers = ''
self._selectors, self._modifiers = self._parse(params)
def select(self, fields=['rowid', '*'], offset=None, limit=None):
''' return SELECT SQL
'''
# base SQL
SQL = 'SELECT %s FROM %s' % (','.join(fields), self._table)
# selectors
if self._selectors:
SQL = ' '.join([SQL, 'WHERE', self._selectors]).strip()
# modifiers
if self._modifiers:
SQL = ' '.join([SQL, self._modifiers])
# limit
if limit is not None and isinstance(limit, int):
SQL = ' '.join((SQL, 'LIMIT %s' % limit))
# offset
if (limit is not None) and (offset is not None) and isinstance(offset, int):
SQL = ' '.join((SQL, 'OFFSET %s' % offset))
return ''.join((SQL, ';'))
def delete(self):
''' return DELETE SQL
'''
SQL = 'DELETE FROM %s' % self._table
if self._selectors:
SQL = ' '.join([SQL, 'WHERE', self._selectors]).strip()
return SQL
def _parse(self, params):
''' parse parameters and return SQL
'''
if not isinstance(params, dict):
return None, None
if len(params) == 0:
return None, None
selectors = list()
modifiers = list()
for k in params.keys():
if k in LOGICAL_OPERATORS:
selectors.append(self._logical(k, params[k]))
elif k in QUERY_MODIFIERS:
modifiers.append(self._modifier(k, params[k]))
else:
if k == '_id':
selectors.append("rowid%s" % (self._value_wrapper(params[k])))
else:
selectors.append("%s%s" % (k, self._value_wrapper(params[k])))
_selectors = ' AND '.join(selectors).strip()
_modifiers = ' '.join(modifiers).strip()
return _selectors, _modifiers
def _logical(self, operator, params):
'''
$and: joins query clauses with a logical AND returns all items
that match the conditions of both clauses
$or: joins query clauses with a logical OR returns all items
that match the conditions of either clause.
'''
result = list()
if isinstance(params, dict):
for k,v in params.items():
selectors, modifiers = self._parse(dict([(k, v),]))
result.append("(%s)" % selectors)
elif isinstance(params, (list, tuple)):
for v in params:
selectors, modifiers = self._parse(v)
result.append("(%s)" % selectors)
else:
raise RuntimeError('Unknow parameter type, %s:%s' % (type(params), params))
if operator == '$and':
return ' AND '.join(result)
elif operator == '$or':
return ' OR '.join(result)
else:
raise RuntimeError('Unknown operator, %s' % operator)
def _modifier(self, operator, params):
'''
$orderby: sorts the results of a query in ascending (1) or descending (-1) order.
'''
if operator == '$orderby':
order_types = {-1: 'DESC', 1: 'ASC'}
if not isinstance(params, dict):
raise RuntimeError('Incorrect parameter type, %s' % params)
return 'ORDER BY %s' % ','.join(["%s %s" % (p, order_types[params[p]]) for p in params])
else:
raise RuntimeError('Unknown operator, %s' % operator)
def _value_wrapper(self, value):
''' wrapper for values
'''
if isinstance(value, (int, float,)):
return '=%s' % value
elif isinstance(value, (str, unicode)):
value = value.strip()
# LIKE
if RE_LIKE.match(value):
return ' LIKE %s' % repr(RE_LIKE.match(value).group('RE_LIKE'))
# REGEXP
elif RE_REGEXP.match(value):
return ' REGEXP %s' % repr(RE_REGEXP.search(value).group('RE_REGEXP'))
else:
return '=%s' % repr(value)
elif value is None:
return ' ISNULL'
|
ownport/scrapy-dblite | dblite/query.py | SQLBuilder._logical | python | def _logical(self, operator, params):
'''
$and: joins query clauses with a logical AND returns all items
that match the conditions of both clauses
$or: joins query clauses with a logical OR returns all items
that match the conditions of either clause.
'''
result = list()
if isinstance(params, dict):
for k,v in params.items():
selectors, modifiers = self._parse(dict([(k, v),]))
result.append("(%s)" % selectors)
elif isinstance(params, (list, tuple)):
for v in params:
selectors, modifiers = self._parse(v)
result.append("(%s)" % selectors)
else:
raise RuntimeError('Unknow parameter type, %s:%s' % (type(params), params))
if operator == '$and':
return ' AND '.join(result)
elif operator == '$or':
return ' OR '.join(result)
else:
raise RuntimeError('Unknown operator, %s' % operator) | $and: joins query clauses with a logical AND returns all items
that match the conditions of both clauses
$or: joins query clauses with a logical OR returns all items
that match the conditions of either clause. | train | https://github.com/ownport/scrapy-dblite/blob/6de5021caa31d439478d9808738b046d1db699c9/dblite/query.py#L100-L125 | null | class SQLBuilder(object):
''' SQLBuilder
'''
def __init__(self, table, params):
'''__init__
table -
params - is dictionary
'''
self._table = table
self._selectors = ''
self._modifiers = ''
self._selectors, self._modifiers = self._parse(params)
def select(self, fields=['rowid', '*'], offset=None, limit=None):
''' return SELECT SQL
'''
# base SQL
SQL = 'SELECT %s FROM %s' % (','.join(fields), self._table)
# selectors
if self._selectors:
SQL = ' '.join([SQL, 'WHERE', self._selectors]).strip()
# modifiers
if self._modifiers:
SQL = ' '.join([SQL, self._modifiers])
# limit
if limit is not None and isinstance(limit, int):
SQL = ' '.join((SQL, 'LIMIT %s' % limit))
# offset
if (limit is not None) and (offset is not None) and isinstance(offset, int):
SQL = ' '.join((SQL, 'OFFSET %s' % offset))
return ''.join((SQL, ';'))
def delete(self):
''' return DELETE SQL
'''
SQL = 'DELETE FROM %s' % self._table
if self._selectors:
SQL = ' '.join([SQL, 'WHERE', self._selectors]).strip()
return SQL
def _parse(self, params):
''' parse parameters and return SQL
'''
if not isinstance(params, dict):
return None, None
if len(params) == 0:
return None, None
selectors = list()
modifiers = list()
for k in params.keys():
if k in LOGICAL_OPERATORS:
selectors.append(self._logical(k, params[k]))
elif k in QUERY_MODIFIERS:
modifiers.append(self._modifier(k, params[k]))
else:
if k == '_id':
selectors.append("rowid%s" % (self._value_wrapper(params[k])))
else:
selectors.append("%s%s" % (k, self._value_wrapper(params[k])))
_selectors = ' AND '.join(selectors).strip()
_modifiers = ' '.join(modifiers).strip()
return _selectors, _modifiers
def _modifier(self, operator, params):
'''
$orderby: sorts the results of a query in ascending (1) or descending (-1) order.
'''
if operator == '$orderby':
order_types = {-1: 'DESC', 1: 'ASC'}
if not isinstance(params, dict):
raise RuntimeError('Incorrect parameter type, %s' % params)
return 'ORDER BY %s' % ','.join(["%s %s" % (p, order_types[params[p]]) for p in params])
else:
raise RuntimeError('Unknown operator, %s' % operator)
def _value_wrapper(self, value):
''' wrapper for values
'''
if isinstance(value, (int, float,)):
return '=%s' % value
elif isinstance(value, (str, unicode)):
value = value.strip()
# LIKE
if RE_LIKE.match(value):
return ' LIKE %s' % repr(RE_LIKE.match(value).group('RE_LIKE'))
# REGEXP
elif RE_REGEXP.match(value):
return ' REGEXP %s' % repr(RE_REGEXP.search(value).group('RE_REGEXP'))
else:
return '=%s' % repr(value)
elif value is None:
return ' ISNULL'
|
ownport/scrapy-dblite | dblite/query.py | SQLBuilder._modifier | python | def _modifier(self, operator, params):
'''
$orderby: sorts the results of a query in ascending (1) or descending (-1) order.
'''
if operator == '$orderby':
order_types = {-1: 'DESC', 1: 'ASC'}
if not isinstance(params, dict):
raise RuntimeError('Incorrect parameter type, %s' % params)
return 'ORDER BY %s' % ','.join(["%s %s" % (p, order_types[params[p]]) for p in params])
else:
raise RuntimeError('Unknown operator, %s' % operator) | $orderby: sorts the results of a query in ascending (1) or descending (-1) order. | train | https://github.com/ownport/scrapy-dblite/blob/6de5021caa31d439478d9808738b046d1db699c9/dblite/query.py#L127-L138 | null | class SQLBuilder(object):
''' SQLBuilder
'''
def __init__(self, table, params):
'''__init__
table -
params - is dictionary
'''
self._table = table
self._selectors = ''
self._modifiers = ''
self._selectors, self._modifiers = self._parse(params)
def select(self, fields=['rowid', '*'], offset=None, limit=None):
''' return SELECT SQL
'''
# base SQL
SQL = 'SELECT %s FROM %s' % (','.join(fields), self._table)
# selectors
if self._selectors:
SQL = ' '.join([SQL, 'WHERE', self._selectors]).strip()
# modifiers
if self._modifiers:
SQL = ' '.join([SQL, self._modifiers])
# limit
if limit is not None and isinstance(limit, int):
SQL = ' '.join((SQL, 'LIMIT %s' % limit))
# offset
if (limit is not None) and (offset is not None) and isinstance(offset, int):
SQL = ' '.join((SQL, 'OFFSET %s' % offset))
return ''.join((SQL, ';'))
def delete(self):
''' return DELETE SQL
'''
SQL = 'DELETE FROM %s' % self._table
if self._selectors:
SQL = ' '.join([SQL, 'WHERE', self._selectors]).strip()
return SQL
def _parse(self, params):
''' parse parameters and return SQL
'''
if not isinstance(params, dict):
return None, None
if len(params) == 0:
return None, None
selectors = list()
modifiers = list()
for k in params.keys():
if k in LOGICAL_OPERATORS:
selectors.append(self._logical(k, params[k]))
elif k in QUERY_MODIFIERS:
modifiers.append(self._modifier(k, params[k]))
else:
if k == '_id':
selectors.append("rowid%s" % (self._value_wrapper(params[k])))
else:
selectors.append("%s%s" % (k, self._value_wrapper(params[k])))
_selectors = ' AND '.join(selectors).strip()
_modifiers = ' '.join(modifiers).strip()
return _selectors, _modifiers
def _logical(self, operator, params):
'''
$and: joins query clauses with a logical AND returns all items
that match the conditions of both clauses
$or: joins query clauses with a logical OR returns all items
that match the conditions of either clause.
'''
result = list()
if isinstance(params, dict):
for k,v in params.items():
selectors, modifiers = self._parse(dict([(k, v),]))
result.append("(%s)" % selectors)
elif isinstance(params, (list, tuple)):
for v in params:
selectors, modifiers = self._parse(v)
result.append("(%s)" % selectors)
else:
raise RuntimeError('Unknow parameter type, %s:%s' % (type(params), params))
if operator == '$and':
return ' AND '.join(result)
elif operator == '$or':
return ' OR '.join(result)
else:
raise RuntimeError('Unknown operator, %s' % operator)
def _value_wrapper(self, value):
''' wrapper for values
'''
if isinstance(value, (int, float,)):
return '=%s' % value
elif isinstance(value, (str, unicode)):
value = value.strip()
# LIKE
if RE_LIKE.match(value):
return ' LIKE %s' % repr(RE_LIKE.match(value).group('RE_LIKE'))
# REGEXP
elif RE_REGEXP.match(value):
return ' REGEXP %s' % repr(RE_REGEXP.search(value).group('RE_REGEXP'))
else:
return '=%s' % repr(value)
elif value is None:
return ' ISNULL'
|
ownport/scrapy-dblite | dblite/query.py | SQLBuilder._value_wrapper | python | def _value_wrapper(self, value):
''' wrapper for values
'''
if isinstance(value, (int, float,)):
return '=%s' % value
elif isinstance(value, (str, unicode)):
value = value.strip()
# LIKE
if RE_LIKE.match(value):
return ' LIKE %s' % repr(RE_LIKE.match(value).group('RE_LIKE'))
# REGEXP
elif RE_REGEXP.match(value):
return ' REGEXP %s' % repr(RE_REGEXP.search(value).group('RE_REGEXP'))
else:
return '=%s' % repr(value)
elif value is None:
return ' ISNULL' | wrapper for values | train | https://github.com/ownport/scrapy-dblite/blob/6de5021caa31d439478d9808738b046d1db699c9/dblite/query.py#L140-L156 | null | class SQLBuilder(object):
''' SQLBuilder
'''
def __init__(self, table, params):
'''__init__
table -
params - is dictionary
'''
self._table = table
self._selectors = ''
self._modifiers = ''
self._selectors, self._modifiers = self._parse(params)
def select(self, fields=['rowid', '*'], offset=None, limit=None):
''' return SELECT SQL
'''
# base SQL
SQL = 'SELECT %s FROM %s' % (','.join(fields), self._table)
# selectors
if self._selectors:
SQL = ' '.join([SQL, 'WHERE', self._selectors]).strip()
# modifiers
if self._modifiers:
SQL = ' '.join([SQL, self._modifiers])
# limit
if limit is not None and isinstance(limit, int):
SQL = ' '.join((SQL, 'LIMIT %s' % limit))
# offset
if (limit is not None) and (offset is not None) and isinstance(offset, int):
SQL = ' '.join((SQL, 'OFFSET %s' % offset))
return ''.join((SQL, ';'))
def delete(self):
''' return DELETE SQL
'''
SQL = 'DELETE FROM %s' % self._table
if self._selectors:
SQL = ' '.join([SQL, 'WHERE', self._selectors]).strip()
return SQL
def _parse(self, params):
''' parse parameters and return SQL
'''
if not isinstance(params, dict):
return None, None
if len(params) == 0:
return None, None
selectors = list()
modifiers = list()
for k in params.keys():
if k in LOGICAL_OPERATORS:
selectors.append(self._logical(k, params[k]))
elif k in QUERY_MODIFIERS:
modifiers.append(self._modifier(k, params[k]))
else:
if k == '_id':
selectors.append("rowid%s" % (self._value_wrapper(params[k])))
else:
selectors.append("%s%s" % (k, self._value_wrapper(params[k])))
_selectors = ' AND '.join(selectors).strip()
_modifiers = ' '.join(modifiers).strip()
return _selectors, _modifiers
def _logical(self, operator, params):
'''
$and: joins query clauses with a logical AND returns all items
that match the conditions of both clauses
$or: joins query clauses with a logical OR returns all items
that match the conditions of either clause.
'''
result = list()
if isinstance(params, dict):
for k,v in params.items():
selectors, modifiers = self._parse(dict([(k, v),]))
result.append("(%s)" % selectors)
elif isinstance(params, (list, tuple)):
for v in params:
selectors, modifiers = self._parse(v)
result.append("(%s)" % selectors)
else:
raise RuntimeError('Unknow parameter type, %s:%s' % (type(params), params))
if operator == '$and':
return ' AND '.join(result)
elif operator == '$or':
return ' OR '.join(result)
else:
raise RuntimeError('Unknown operator, %s' % operator)
def _modifier(self, operator, params):
'''
$orderby: sorts the results of a query in ascending (1) or descending (-1) order.
'''
if operator == '$orderby':
order_types = {-1: 'DESC', 1: 'ASC'}
if not isinstance(params, dict):
raise RuntimeError('Incorrect parameter type, %s' % params)
return 'ORDER BY %s' % ','.join(["%s %s" % (p, order_types[params[p]]) for p in params])
else:
raise RuntimeError('Unknown operator, %s' % operator)
|
BlendedSiteGenerator/Blended | blended/__main__.py | install_template | python | def install_template(username, repo):
print("Installing template from " + username + "/" + repo)
dpath = os.path.join(cwd, "templates")
getunzipped(username, repo, dpath) | Installs a Blended template from GitHub | train | https://github.com/BlendedSiteGenerator/Blended/blob/e5865a8633e461a22c86ef6ee98cdd7051c412ac/blended/__main__.py#L65-L70 | [
"def getunzipped(username, repo, thedir):\n \"\"\"Downloads and unzips a zip file\"\"\"\n theurl = \"https://github.com/\" + username + \"/\" + repo + \"/archive/master.zip\"\n name = os.path.join(thedir, 'temp.zip')\n try:\n name = urllib.urlretrieve(theurl, name)\n name = os.path.join(th... | """This is the Blended Static Website Generator"""
# encoding=utf8
import os
import os.path
import sys
from sys import platform
import shutil
import fileinput
import webbrowser
from datetime import datetime
from random import randint
from ftplib import FTP, error_perm
import time
import calendar
import subprocess
import importlib
import click
import pkg_resources
import markdown
import textile
from docutils.core import publish_parts
import mammoth
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import sass
import pyjade
import lesscpy
from six import StringIO
from stylus import Stylus
import coffeescript
from jsmin import jsmin
from cssmin import cssmin
import pip
from .functions import create_folder, replace_folder, get_html_filename, get_html_clear_filename, getunzipped, checkConfig, createConfig, createBlendedFolders, parseXML
# Very important, get the directory that the user wants to run commands in
cwd = os.getcwd()
try:
app_version = pkg_resources.require("blended")[0].version
app_version = app_version[:3]
except:
app_version = "NOTSET"
print("WARNING: app_version not set.\n")
@click.group()
def cli():
"""Blended: Static Website Generator"""
@cli.command('info', short_help='Show info about Blended and the current project.')
def version():
"""Prints info about Blended"""
print("You are running Blended v" + app_version)
print("The current working directory is " + cwd)
@cli.command('install-template', short_help='Install a Blended template from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
@cli.command('import-wp', short_help='Import a site from WordPress')
@click.option('--filepath', prompt='WordPress export file', help='Which file holds the exported data from WordPress')
def import_wp(filepath):
"""Imports A WordPress export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from WordPress...")
wp = parseXML(filepath)
wname = wp.rss.channel.title.cdata
wdesc = wp.rss.channel.description.cdata
wlan = wp.rss.channel.language.cdata
wurl = wp.rss.channel.link.cdata
aname = wp.rss.channel.wp_author.wp_author_display_name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlan=wlan, wurl=wurl, aname=aname)
for item in wp.rss.channel.item:
with open(os.path.join(cwd, "content", item.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(item.content_encoded.cdata.strip())
print("\nYour website has been imported from WordPress.")
@cli.command('import-blogger', short_help='Import a site from Blogger')
@click.option('--filepath', prompt='Blogger export file', help='Which file holds the exported data from Blogger')
def import_blogger(filepath):
"""Imports A Blogger export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from Blogger...")
blogger = parseXML(filepath)
wname = blogger.feed.title.cdata
aname = blogger.feed.author.name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname, aname=aname)
for entry in blogger.feed.entry:
if "post" in entry.id.cdata:
with open(os.path.join(cwd, "content", entry.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(entry.content.cdata.strip())
print("\nYour website has been imported from Blogger.")
@cli.command('install-plugin', short_help='Install a Blended plugin from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_plugin(username, repo):
"""Installs a Blended plugin from GitHub"""
print("Installing plugin from " + username + "/" + repo)
pip.main(['install', '-U', "git+git://github.com/" +
username + "/" + repo + ".git"])
@cli.command('init', short_help='Initiate a new website')
def init():
"""Initiates a new website"""
print("Blended: Static Website Generator -\n")
checkConfig()
if (sys.version_info > (3, 0)):
wname = input("Website Name: ")
wdesc = input("Website Description: ")
wlan = input("Website Language: ")
wlic = input("Website License: ")
aname = input("Author(s) Name(s): ")
else:
wname = raw_input("Website Name: ")
wdesc = raw_input("Website Description: ")
wlan = raw_input("Website Language: ")
wlic = raw_input("Website License: ")
aname = raw_input("Author(s) Name(s): ")
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlic=wlic, wlan=wlan, aname=aname)
print("\nThe required files for your website have been generated.")
def placeFiles(ftp, path):
"""Upload the built files to FTP"""
for name in os.listdir(path):
if name != "config.py" and name != "config.pyc" and name != "templates" and name != "content":
localpath = os.path.join(path, name)
if os.path.isfile(localpath):
print("STOR", name, localpath)
ftp.storbinary('STOR ' + name, open(localpath, 'rb'))
elif os.path.isdir(localpath):
print("MKD", name)
try:
ftp.mkd(name)
# ignore "directory already exists"
except error_perm as e:
if not e.args[0].startswith('550'):
raise
print("CWD", name)
ftp.cwd(name)
placeFiles(ftp, localpath)
print("CWD", "..")
ftp.cwd("..")
@cli.command('ftp', short_help='Upload the files via ftp')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def send_ftp(outdir):
"""Upload the built website to FTP"""
print("Uploading the files in the " + outdir + "/ directory!\n")
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import ftp_server, ftp_username, ftp_password, ftp_port, ftp_upload_path
except:
sys.exit(
"The FTP settings could not be found. Maybe your config file is too old. Re-run 'blended init' to fix it.")
server = ftp_server
username = ftp_username
password = ftp_password
port = ftp_port
ftp = FTP()
ftp.connect(server, port)
ftp.login(username, password)
filenameCV = os.path.join(cwd, outdir)
try:
ftp.cwd(ftp_upload_path)
placeFiles(ftp, filenameCV)
except:
ftp.quit()
sys.exit("Files not able to be uploaded! Are you sure the directory exists?")
ftp.quit()
print("\nFTP Done!")
@cli.command('clean', short_help='Remove the build folder')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def clean_built(outdir):
"""Removes all built files"""
print("Removing the built files!")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
@cli.command('zip', short_help='Package the build folder into a zip file')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def zip_built(outdir):
"""Packages the build folder into a zip"""
print("Zipping the built files!")
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name
except:
sys.exit(
"Some of the configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
zip_dir = os.path.join(cwd, website_name.replace(" ", "_") + "-build-" +
str(datetime.now().date()))
if os.path.exists(build_dir):
shutil.make_archive(zip_dir, 'zip', build_dir)
else:
print("The " + outdir +
"/ folder could not be found! Have you run 'blended build' yet?")
@cli.command('purge', short_help='Purge all the files created by Blended')
def purge():
"""Removes all files generated by Blended"""
print("Purging the Blended files!")
# Remove the templates folder
templ_dir = os.path.join(cwd, "templates")
if os.path.exists(templ_dir):
shutil.rmtree(templ_dir)
# Remove the content folder
cont_dir = os.path.join(cwd, "content")
if os.path.exists(cont_dir):
shutil.rmtree(cont_dir)
# Remove the build folder
build_dir = os.path.join(cwd, "build")
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
# Remove config.py
config_file_dir = os.path.join(cwd, "config.py")
if os.path.exists(config_file_dir):
os.remove(config_file_dir)
# Remove config.pyc
config2_file_dir = os.path.join(cwd, "config.pyc")
if os.path.exists(config2_file_dir):
os.remove(config2_file_dir)
# Remove config.py
config3_file_dir = os.path.join(cwd, "config.py.oldbak")
if os.path.exists(config3_file_dir):
os.remove(config3_file_dir)
def convert_text(filename):
"""Convert the post/page content using the converters"""
text_content = open(filename, "r")
if ".md" in filename:
text_cont1 = "\n" + markdown.markdown(text_content.read()) + "\n"
elif ".docx" in filename:
with open(os.path.join(cwd, "content", filename), "rb") as docx_file:
result = mammoth.convert_to_html(docx_file)
final_docx_html = result.value
text_cont1 = "\n" + final_docx_html + "\n"
elif ".tile" in filename:
text_cont1 = "\n" + textile.textile(text_content.read()) + "\n"
elif ".jade" in filename:
text_cont1 = "\n" + pyjade.simple_convert(text_content.read()) + "\n"
elif ".rst" in filename:
text_cont1 = "\n" + \
publish_parts(text_content.read(), writer_name='html')[
'html_body'] + "\n"
elif ".html" in filename:
text_cont1 = text_content.read()
elif ".txt" in filename:
text_cont1 = text_content.read()
else:
print(filename + " is not a valid file type!")
text_cont1 = "NULL"
return text_cont1 + "\n\n"
def build_files(outdir):
"""Build the files!"""
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name, website_description, website_language, home_page_list
except:
sys.exit(
"ERROR: Some of the crucial configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
try:
from config import website_description_long, website_license, website_url, author_name, author_bio, plugins, minify_css, minify_js, custom_variables
except:
website_description_long = ""
website_license = ""
website_url = ""
author_name = ""
author_bio = ""
plugins = []
custom_variables = {}
minify_css = False
minify_js = False
print("WARNING: Some of the optional configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.\n")
# Create the build folder
build_dir = os.path.join(cwd, outdir)
if "." not in outdir and ".." not in outdir and "..." not in outdir and "...." not in outdir and "....." not in outdir:
replace_folder(build_dir)
# Make sure there is actually a header template file
header_file_dir = os.path.join(cwd, "templates", "header.html")
if not os.path.exists(header_file_dir):
sys.exit(
"There dosen't seem to be a header template file. You need one to generate.")
# Make sure there is actually a footer template file
footer_file_dir = os.path.join(cwd, "templates", "footer.html")
if not os.path.exists(footer_file_dir):
sys.exit(
"There dosen't seem to be a footer template file. You need one to generate.")
# Open the header and footer files for reading
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
# Create the HTML page listing
page_list_item_file = os.path.join(cwd, "templates", "page_list_item.html")
if not os.path.exists(page_list_item_file):
page_list = '<ul class="page-list">\n'
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
for filename in files:
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + '<li class="page-list-item"><a href="' + subfolder_link + newFilename + \
'">' + newFilename2 + '</a><span class="page-list-item-time"> - ' + \
str(file_modified) + '</span></li>\n'
page_list = page_list + '</ul>'
else:
with open(page_list_item_file, 'r') as f:
page_list_item = f.read()
page_list = ""
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
p_content = convert_text(os.path.join(root, filename))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + page_list_item.replace("{path}", subfolder_link + newFilename).replace("{name}", newFilename2).replace(
"{date}", str(file_modified)).replace("{content}", p_content).replace("{content_short}", p_content[:250] + "...").replace("{day}", file_modified_day).replace("{month}", file_modified_month).replace("{month_name}", month_name).replace("{year}", file_modified_year)
if home_page_list == "yes" or home_page_list:
# Open the home page file (index.html) for writing
home_working_file = open(os.path.join(cwd, outdir, "index.html"), "w")
home_working_file.write(header_file.read())
# Make sure there is actually a home page template file
home_templ_dir = os.path.join(cwd, "templates", "home_page.html")
if os.path.exists(home_templ_dir):
home_templ_file = open(home_templ_dir, "r")
home_working_file.write(home_templ_file.read())
else:
print("\nNo home page template file found. Writing page list to index.html")
home_working_file.write(page_list)
home_working_file.write(footer_file.read())
home_working_file.close()
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
if not filename.startswith("_"):
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
newFilename = get_html_filename(filename)
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
currents_working_file = open(
os.path.join(cwd, outdir, newFilename), "w")
else:
create_folder(os.path.join(cwd, outdir, subfolder))
currents_working_file = open(os.path.join(
cwd, outdir, subfolder, newFilename), "w")
# Write the header
currents_working_file.write(header_file.read())
text_cont1 = convert_text(os.path.join(root, filename))
if "+++++" in text_cont1.splitlines()[1]:
page_template_file = text_cont1.splitlines()[0]
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[0], "")
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[1], "")
else:
page_template_file = "content_page"
# Write the text content into the content template and onto the
# build file
content_templ_dir = os.path.join(
cwd, "templates", page_template_file + ".html")
if os.path.exists(content_templ_dir):
content_templ_file = open(content_templ_dir, "r")
content_templ_file1 = content_templ_file.read()
content_templ_file2 = content_templ_file1.replace(
"{page_content}", text_cont1)
currents_working_file.write(content_templ_file2)
else:
currents_working_file.write(text_cont1)
# Write the footer to the build file
currents_working_file.write("\n" + footer_file.read())
# Close the build file
currents_working_file.close()
# Find all the nav(something) templates in the `templates` folder and
# Read their content to the dict
navs = {}
for file in os.listdir(os.path.join(cwd, "templates")):
if "nav" in file:
nav_cont = open(os.path.join(cwd, "templates", file), "r")
navs[file.replace(".html", "")] = nav_cont.read()
nav_cont.close()
forbidden_dirs = set(["assets", "templates"])
blended_version_message = "Built with Blended v" + \
str(app_version)
build_date = str(datetime.now().date())
build_time = str(datetime.now().time())
build_datetime = str(datetime.now())
# Replace global variables such as site name and language
for root, dirs, files in os.walk(os.path.join(cwd, outdir)):
dirs[:] = [d for d in dirs if d not in forbidden_dirs]
for filename in files:
if filename != "config.pyc" and filename != "config.py":
newFilename = get_html_clear_filename(filename)
page_file = filename.replace(".html", "")
page_folder = os.path.basename(os.path.dirname(os.path.join(
root, filename))).replace("-", "").replace("_", "").title()
page_folder_orig = os.path.basename(
os.path.dirname(os.path.join(root, filename)))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, outdir), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_folder = os.path.join(cwd, outdir, filename)
else:
subfolder_folder = os.path.join(
cwd, outdir, subfolder, filename)
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
# The Loop!
for line in fileinput.input(subfolder_folder, inplace=1):
for var in custom_variables:
line = line.replace(
"{" + var + "}", custom_variables[var])
if len(plugins) != 0:
for i in range(len(plugins)):
if sys.version_info[0] < 2:
main = importlib.import_module(plugins[i])
elif sys.version_info[0] < 3:
main = __import__(plugins[i])
content = main.main()
line = line.replace(
"{" + plugins[i] + "}", content)
if "{nav" in line:
navname = line.split("{")[1].split("}")[0]
line = line.replace(
"{" + navname + "}", navs[(line.split("{"))[1].split("}")[0]])
line = line.replace(
"{website_description}", website_description)
line = line.replace(
"{website_description_long}", website_description_long)
line = line.replace("{website_license}", website_license)
line = line.replace("{website_language}", website_language)
line = line.replace("{website_url}", website_url)
line = line.replace("{author_name}", author_name)
line = line.replace("{author_bio}", author_bio)
line = line.replace("{random_number}",
str(randint(0, 100000000)))
line = line.replace("{build_date}", build_date)
line = line.replace("{build_time}", build_time)
line = line.replace("{build_datetime}", build_datetime)
line = line.replace("{page_list}", page_list)
line = line.replace("{page_name}", newFilename)
line = line.replace("{page_filename}", page_file)
line = line.replace("{page_file}", filename)
line = line.replace("{" + filename + "_active}", "active")
if page_folder != outdir.title():
line = line.replace("{page_folder}", page_folder)
else:
line = line.replace("{page_folder}", "")
if page_folder_orig != outdir:
line = line.replace(
"{page_folder_orig}", page_folder_orig)
else:
line = line.replace("{page_folder_orig}", "")
line = line.replace("{page_date}", str(file_modified))
line = line.replace("{page_day}", str(file_modified_day))
line = line.replace("{page_year}", str(file_modified_year))
line = line.replace(
"{page_month}", str(file_modified_month))
line = line.replace(
"{page_month_name}", str(month_name))
line = line.replace("{blended_version}", str(app_version))
line = line.replace(
"{blended_version_message}", blended_version_message)
line = line.replace("{website_name}", website_name)
top = os.path.join(cwd, outdir)
startinglevel = top.count(os.sep)
relative_path = ""
level = root.count(os.sep) - startinglevel
for i in range(level):
relative_path = relative_path + "../"
line = line.replace("{relative_root}", relative_path)
print(line.rstrip('\n'))
fileinput.close()
# Copy the asset folder to the build folder
if os.path.exists(os.path.join(cwd, "templates", "assets")):
if os.path.exists(os.path.join(cwd, outdir, "assets")):
shutil.rmtree(os.path.join(cwd, outdir, "assets"))
shutil.copytree(os.path.join(cwd, "templates", "assets"),
os.path.join(cwd, outdir, "assets"))
for root, dirs, files in os.walk(os.path.join(cwd, outdir, "assets")):
for file in files:
if not file.startswith("_"):
if (file.endswith(".sass")) or (file.endswith(".scss")):
sass_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if sass_text != "":
text_file.write(sass.compile(string=sass_text))
else:
print(file + " is empty! Not compiling Sass.")
text_file.close()
if file.endswith(".less"):
less_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if less_text != "":
text_file.write(lesscpy.compile(StringIO(less_text)))
else:
print(file + " is empty! Not compiling Less.")
text_file.close()
if file.endswith(".styl"):
try:
styl_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if styl_text != "":
text_file.write(Stylus().compile(styl_text))
else:
print(file + " is empty! Not compiling Styl.")
text_file.close()
except:
print("Not able to build with Stylus! Is it installed?")
try:
subprocess.call["npm", "install", "-g", "stylus"]
except:
print("NPM (NodeJS) not working. Is it installed?")
if file.endswith(".coffee"):
coffee_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file[:-6] + "js"), "w")
if coffee_text != "":
text_file.write(coffeescript.compile(coffee_text))
else:
print(file + " is empty! Not compiling CoffeeScript.")
text_file.close()
if minify_css:
if file.endswith(".css"):
css_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if css_text != "":
text_file.write(cssmin(css_text))
text_file.close()
if minify_js:
if file.endswith(".js"):
js_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if js_text != "":
text_file.write(jsmin(js_text))
text_file.close()
@cli.command('build', short_help='Build the Blended files into a website')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def build(outdir):
"""Blends the generated files and outputs a HTML website"""
print("Building your Blended files into a website!")
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("The files are built! You can find them in the " + outdir +
"/ directory. Run the view command to see what you have created in a web browser.")
outdir_type = "build"
class Watcher:
"""Watch the specified dirs for changes"""
DIRECTORY_TO_WATCH = os.path.join(cwd, "content")
def __init__(self):
self.observer = Observer()
def run(self):
"""Run the builder on changes"""
event_handler = Handler()
threads = []
paths = [os.path.join(cwd, "content"), os.path.join(cwd, "templates")]
for i in paths:
targetPath = str(i)
self.observer.schedule(event_handler, targetPath, recursive=True)
threads.append(self.observer)
self.observer.start()
try:
while True:
time.sleep(5)
except:
self.observer.stop()
print("\nObserver stopped.")
self.observer.join()
class Handler(FileSystemEventHandler):
"""The handler for the file change watcher"""
@staticmethod
def on_any_event(event):
global outdir_type
if event.is_directory:
return None
elif event.event_type == 'created':
# Take any action here when a file is first created.
build_files(outdir_type)
print("%s created" % event.src_path)
elif event.event_type == 'modified':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s modified" % event.src_path)
elif event.event_type == 'deleted':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s deleted" % event.src_path)
@cli.command('interactive', short_help='Build the Blended files into a website on each file change')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def interactive(outdir):
"""Blends the generated files and outputs a HTML website on file change"""
print("Building your Blended files into a website!")
global outdir_type
outdir_type = outdir
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("Watching the content and templates directories for changes, press CTRL+C to stop...\n")
w = Watcher()
w.run()
@cli.command('view', short_help='View the finished Blended website')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def view(outdir):
"""Opens the built index.html file in a web browser"""
index_path = os.path.realpath(os.path.join(cwd, outdir, "index.html"))
if os.path.exists(index_path):
webbrowser.open('file://' + index_path)
else:
print("The index.html file could not be found in the " + outdir +
"/ folder! Have you deleted it or have you built with home_page_list set to 'no' in config.py?")
if __name__ == '__main__':
cli()
|
BlendedSiteGenerator/Blended | blended/__main__.py | import_wp | python | def import_wp(filepath):
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from WordPress...")
wp = parseXML(filepath)
wname = wp.rss.channel.title.cdata
wdesc = wp.rss.channel.description.cdata
wlan = wp.rss.channel.language.cdata
wurl = wp.rss.channel.link.cdata
aname = wp.rss.channel.wp_author.wp_author_display_name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlan=wlan, wurl=wurl, aname=aname)
for item in wp.rss.channel.item:
with open(os.path.join(cwd, "content", item.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(item.content_encoded.cdata.strip())
print("\nYour website has been imported from WordPress.") | Imports A WordPress export and converts it to a Blended site | train | https://github.com/BlendedSiteGenerator/Blended/blob/e5865a8633e461a22c86ef6ee98cdd7051c412ac/blended/__main__.py#L75-L101 | [
"def checkConfig():\n \"\"\"If the config.py file exists, back it up\"\"\"\n config_file_dir = os.path.join(cwd, \"config.py\")\n if os.path.exists(config_file_dir):\n print(\"Making a backup of your config file!\")\n config_file_dir2 = os.path.join(cwd, \"config.py.oldbak\")\n copyfil... | """This is the Blended Static Website Generator"""
# encoding=utf8
import os
import os.path
import sys
from sys import platform
import shutil
import fileinput
import webbrowser
from datetime import datetime
from random import randint
from ftplib import FTP, error_perm
import time
import calendar
import subprocess
import importlib
import click
import pkg_resources
import markdown
import textile
from docutils.core import publish_parts
import mammoth
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import sass
import pyjade
import lesscpy
from six import StringIO
from stylus import Stylus
import coffeescript
from jsmin import jsmin
from cssmin import cssmin
import pip
from .functions import create_folder, replace_folder, get_html_filename, get_html_clear_filename, getunzipped, checkConfig, createConfig, createBlendedFolders, parseXML
# Very important, get the directory that the user wants to run commands in
cwd = os.getcwd()
try:
app_version = pkg_resources.require("blended")[0].version
app_version = app_version[:3]
except:
app_version = "NOTSET"
print("WARNING: app_version not set.\n")
@click.group()
def cli():
"""Blended: Static Website Generator"""
@cli.command('info', short_help='Show info about Blended and the current project.')
def version():
"""Prints info about Blended"""
print("You are running Blended v" + app_version)
print("The current working directory is " + cwd)
@cli.command('install-template', short_help='Install a Blended template from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_template(username, repo):
"""Installs a Blended template from GitHub"""
print("Installing template from " + username + "/" + repo)
dpath = os.path.join(cwd, "templates")
getunzipped(username, repo, dpath)
@cli.command('import-wp', short_help='Import a site from WordPress')
@click.option('--filepath', prompt='WordPress export file', help='Which file holds the exported data from WordPress')
@cli.command('import-blogger', short_help='Import a site from Blogger')
@click.option('--filepath', prompt='Blogger export file', help='Which file holds the exported data from Blogger')
def import_blogger(filepath):
"""Imports A Blogger export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from Blogger...")
blogger = parseXML(filepath)
wname = blogger.feed.title.cdata
aname = blogger.feed.author.name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname, aname=aname)
for entry in blogger.feed.entry:
if "post" in entry.id.cdata:
with open(os.path.join(cwd, "content", entry.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(entry.content.cdata.strip())
print("\nYour website has been imported from Blogger.")
@cli.command('install-plugin', short_help='Install a Blended plugin from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_plugin(username, repo):
"""Installs a Blended plugin from GitHub"""
print("Installing plugin from " + username + "/" + repo)
pip.main(['install', '-U', "git+git://github.com/" +
username + "/" + repo + ".git"])
@cli.command('init', short_help='Initiate a new website')
def init():
"""Initiates a new website"""
print("Blended: Static Website Generator -\n")
checkConfig()
if (sys.version_info > (3, 0)):
wname = input("Website Name: ")
wdesc = input("Website Description: ")
wlan = input("Website Language: ")
wlic = input("Website License: ")
aname = input("Author(s) Name(s): ")
else:
wname = raw_input("Website Name: ")
wdesc = raw_input("Website Description: ")
wlan = raw_input("Website Language: ")
wlic = raw_input("Website License: ")
aname = raw_input("Author(s) Name(s): ")
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlic=wlic, wlan=wlan, aname=aname)
print("\nThe required files for your website have been generated.")
def placeFiles(ftp, path):
"""Upload the built files to FTP"""
for name in os.listdir(path):
if name != "config.py" and name != "config.pyc" and name != "templates" and name != "content":
localpath = os.path.join(path, name)
if os.path.isfile(localpath):
print("STOR", name, localpath)
ftp.storbinary('STOR ' + name, open(localpath, 'rb'))
elif os.path.isdir(localpath):
print("MKD", name)
try:
ftp.mkd(name)
# ignore "directory already exists"
except error_perm as e:
if not e.args[0].startswith('550'):
raise
print("CWD", name)
ftp.cwd(name)
placeFiles(ftp, localpath)
print("CWD", "..")
ftp.cwd("..")
@cli.command('ftp', short_help='Upload the files via ftp')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def send_ftp(outdir):
"""Upload the built website to FTP"""
print("Uploading the files in the " + outdir + "/ directory!\n")
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import ftp_server, ftp_username, ftp_password, ftp_port, ftp_upload_path
except:
sys.exit(
"The FTP settings could not be found. Maybe your config file is too old. Re-run 'blended init' to fix it.")
server = ftp_server
username = ftp_username
password = ftp_password
port = ftp_port
ftp = FTP()
ftp.connect(server, port)
ftp.login(username, password)
filenameCV = os.path.join(cwd, outdir)
try:
ftp.cwd(ftp_upload_path)
placeFiles(ftp, filenameCV)
except:
ftp.quit()
sys.exit("Files not able to be uploaded! Are you sure the directory exists?")
ftp.quit()
print("\nFTP Done!")
@cli.command('clean', short_help='Remove the build folder')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def clean_built(outdir):
"""Removes all built files"""
print("Removing the built files!")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
@cli.command('zip', short_help='Package the build folder into a zip file')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def zip_built(outdir):
"""Packages the build folder into a zip"""
print("Zipping the built files!")
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name
except:
sys.exit(
"Some of the configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
zip_dir = os.path.join(cwd, website_name.replace(" ", "_") + "-build-" +
str(datetime.now().date()))
if os.path.exists(build_dir):
shutil.make_archive(zip_dir, 'zip', build_dir)
else:
print("The " + outdir +
"/ folder could not be found! Have you run 'blended build' yet?")
@cli.command('purge', short_help='Purge all the files created by Blended')
def purge():
"""Removes all files generated by Blended"""
print("Purging the Blended files!")
# Remove the templates folder
templ_dir = os.path.join(cwd, "templates")
if os.path.exists(templ_dir):
shutil.rmtree(templ_dir)
# Remove the content folder
cont_dir = os.path.join(cwd, "content")
if os.path.exists(cont_dir):
shutil.rmtree(cont_dir)
# Remove the build folder
build_dir = os.path.join(cwd, "build")
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
# Remove config.py
config_file_dir = os.path.join(cwd, "config.py")
if os.path.exists(config_file_dir):
os.remove(config_file_dir)
# Remove config.pyc
config2_file_dir = os.path.join(cwd, "config.pyc")
if os.path.exists(config2_file_dir):
os.remove(config2_file_dir)
# Remove config.py
config3_file_dir = os.path.join(cwd, "config.py.oldbak")
if os.path.exists(config3_file_dir):
os.remove(config3_file_dir)
def convert_text(filename):
"""Convert the post/page content using the converters"""
text_content = open(filename, "r")
if ".md" in filename:
text_cont1 = "\n" + markdown.markdown(text_content.read()) + "\n"
elif ".docx" in filename:
with open(os.path.join(cwd, "content", filename), "rb") as docx_file:
result = mammoth.convert_to_html(docx_file)
final_docx_html = result.value
text_cont1 = "\n" + final_docx_html + "\n"
elif ".tile" in filename:
text_cont1 = "\n" + textile.textile(text_content.read()) + "\n"
elif ".jade" in filename:
text_cont1 = "\n" + pyjade.simple_convert(text_content.read()) + "\n"
elif ".rst" in filename:
text_cont1 = "\n" + \
publish_parts(text_content.read(), writer_name='html')[
'html_body'] + "\n"
elif ".html" in filename:
text_cont1 = text_content.read()
elif ".txt" in filename:
text_cont1 = text_content.read()
else:
print(filename + " is not a valid file type!")
text_cont1 = "NULL"
return text_cont1 + "\n\n"
def build_files(outdir):
"""Build the files!"""
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name, website_description, website_language, home_page_list
except:
sys.exit(
"ERROR: Some of the crucial configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
try:
from config import website_description_long, website_license, website_url, author_name, author_bio, plugins, minify_css, minify_js, custom_variables
except:
website_description_long = ""
website_license = ""
website_url = ""
author_name = ""
author_bio = ""
plugins = []
custom_variables = {}
minify_css = False
minify_js = False
print("WARNING: Some of the optional configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.\n")
# Create the build folder
build_dir = os.path.join(cwd, outdir)
if "." not in outdir and ".." not in outdir and "..." not in outdir and "...." not in outdir and "....." not in outdir:
replace_folder(build_dir)
# Make sure there is actually a header template file
header_file_dir = os.path.join(cwd, "templates", "header.html")
if not os.path.exists(header_file_dir):
sys.exit(
"There dosen't seem to be a header template file. You need one to generate.")
# Make sure there is actually a footer template file
footer_file_dir = os.path.join(cwd, "templates", "footer.html")
if not os.path.exists(footer_file_dir):
sys.exit(
"There dosen't seem to be a footer template file. You need one to generate.")
# Open the header and footer files for reading
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
# Create the HTML page listing
page_list_item_file = os.path.join(cwd, "templates", "page_list_item.html")
if not os.path.exists(page_list_item_file):
page_list = '<ul class="page-list">\n'
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
for filename in files:
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + '<li class="page-list-item"><a href="' + subfolder_link + newFilename + \
'">' + newFilename2 + '</a><span class="page-list-item-time"> - ' + \
str(file_modified) + '</span></li>\n'
page_list = page_list + '</ul>'
else:
with open(page_list_item_file, 'r') as f:
page_list_item = f.read()
page_list = ""
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
p_content = convert_text(os.path.join(root, filename))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + page_list_item.replace("{path}", subfolder_link + newFilename).replace("{name}", newFilename2).replace(
"{date}", str(file_modified)).replace("{content}", p_content).replace("{content_short}", p_content[:250] + "...").replace("{day}", file_modified_day).replace("{month}", file_modified_month).replace("{month_name}", month_name).replace("{year}", file_modified_year)
if home_page_list == "yes" or home_page_list:
# Open the home page file (index.html) for writing
home_working_file = open(os.path.join(cwd, outdir, "index.html"), "w")
home_working_file.write(header_file.read())
# Make sure there is actually a home page template file
home_templ_dir = os.path.join(cwd, "templates", "home_page.html")
if os.path.exists(home_templ_dir):
home_templ_file = open(home_templ_dir, "r")
home_working_file.write(home_templ_file.read())
else:
print("\nNo home page template file found. Writing page list to index.html")
home_working_file.write(page_list)
home_working_file.write(footer_file.read())
home_working_file.close()
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
if not filename.startswith("_"):
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
newFilename = get_html_filename(filename)
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
currents_working_file = open(
os.path.join(cwd, outdir, newFilename), "w")
else:
create_folder(os.path.join(cwd, outdir, subfolder))
currents_working_file = open(os.path.join(
cwd, outdir, subfolder, newFilename), "w")
# Write the header
currents_working_file.write(header_file.read())
text_cont1 = convert_text(os.path.join(root, filename))
if "+++++" in text_cont1.splitlines()[1]:
page_template_file = text_cont1.splitlines()[0]
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[0], "")
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[1], "")
else:
page_template_file = "content_page"
# Write the text content into the content template and onto the
# build file
content_templ_dir = os.path.join(
cwd, "templates", page_template_file + ".html")
if os.path.exists(content_templ_dir):
content_templ_file = open(content_templ_dir, "r")
content_templ_file1 = content_templ_file.read()
content_templ_file2 = content_templ_file1.replace(
"{page_content}", text_cont1)
currents_working_file.write(content_templ_file2)
else:
currents_working_file.write(text_cont1)
# Write the footer to the build file
currents_working_file.write("\n" + footer_file.read())
# Close the build file
currents_working_file.close()
# Find all the nav(something) templates in the `templates` folder and
# Read their content to the dict
navs = {}
for file in os.listdir(os.path.join(cwd, "templates")):
if "nav" in file:
nav_cont = open(os.path.join(cwd, "templates", file), "r")
navs[file.replace(".html", "")] = nav_cont.read()
nav_cont.close()
forbidden_dirs = set(["assets", "templates"])
blended_version_message = "Built with Blended v" + \
str(app_version)
build_date = str(datetime.now().date())
build_time = str(datetime.now().time())
build_datetime = str(datetime.now())
# Replace global variables such as site name and language
for root, dirs, files in os.walk(os.path.join(cwd, outdir)):
dirs[:] = [d for d in dirs if d not in forbidden_dirs]
for filename in files:
if filename != "config.pyc" and filename != "config.py":
newFilename = get_html_clear_filename(filename)
page_file = filename.replace(".html", "")
page_folder = os.path.basename(os.path.dirname(os.path.join(
root, filename))).replace("-", "").replace("_", "").title()
page_folder_orig = os.path.basename(
os.path.dirname(os.path.join(root, filename)))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, outdir), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_folder = os.path.join(cwd, outdir, filename)
else:
subfolder_folder = os.path.join(
cwd, outdir, subfolder, filename)
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
# The Loop!
for line in fileinput.input(subfolder_folder, inplace=1):
for var in custom_variables:
line = line.replace(
"{" + var + "}", custom_variables[var])
if len(plugins) != 0:
for i in range(len(plugins)):
if sys.version_info[0] < 2:
main = importlib.import_module(plugins[i])
elif sys.version_info[0] < 3:
main = __import__(plugins[i])
content = main.main()
line = line.replace(
"{" + plugins[i] + "}", content)
if "{nav" in line:
navname = line.split("{")[1].split("}")[0]
line = line.replace(
"{" + navname + "}", navs[(line.split("{"))[1].split("}")[0]])
line = line.replace(
"{website_description}", website_description)
line = line.replace(
"{website_description_long}", website_description_long)
line = line.replace("{website_license}", website_license)
line = line.replace("{website_language}", website_language)
line = line.replace("{website_url}", website_url)
line = line.replace("{author_name}", author_name)
line = line.replace("{author_bio}", author_bio)
line = line.replace("{random_number}",
str(randint(0, 100000000)))
line = line.replace("{build_date}", build_date)
line = line.replace("{build_time}", build_time)
line = line.replace("{build_datetime}", build_datetime)
line = line.replace("{page_list}", page_list)
line = line.replace("{page_name}", newFilename)
line = line.replace("{page_filename}", page_file)
line = line.replace("{page_file}", filename)
line = line.replace("{" + filename + "_active}", "active")
if page_folder != outdir.title():
line = line.replace("{page_folder}", page_folder)
else:
line = line.replace("{page_folder}", "")
if page_folder_orig != outdir:
line = line.replace(
"{page_folder_orig}", page_folder_orig)
else:
line = line.replace("{page_folder_orig}", "")
line = line.replace("{page_date}", str(file_modified))
line = line.replace("{page_day}", str(file_modified_day))
line = line.replace("{page_year}", str(file_modified_year))
line = line.replace(
"{page_month}", str(file_modified_month))
line = line.replace(
"{page_month_name}", str(month_name))
line = line.replace("{blended_version}", str(app_version))
line = line.replace(
"{blended_version_message}", blended_version_message)
line = line.replace("{website_name}", website_name)
top = os.path.join(cwd, outdir)
startinglevel = top.count(os.sep)
relative_path = ""
level = root.count(os.sep) - startinglevel
for i in range(level):
relative_path = relative_path + "../"
line = line.replace("{relative_root}", relative_path)
print(line.rstrip('\n'))
fileinput.close()
# Copy the asset folder to the build folder
if os.path.exists(os.path.join(cwd, "templates", "assets")):
if os.path.exists(os.path.join(cwd, outdir, "assets")):
shutil.rmtree(os.path.join(cwd, outdir, "assets"))
shutil.copytree(os.path.join(cwd, "templates", "assets"),
os.path.join(cwd, outdir, "assets"))
for root, dirs, files in os.walk(os.path.join(cwd, outdir, "assets")):
for file in files:
if not file.startswith("_"):
if (file.endswith(".sass")) or (file.endswith(".scss")):
sass_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if sass_text != "":
text_file.write(sass.compile(string=sass_text))
else:
print(file + " is empty! Not compiling Sass.")
text_file.close()
if file.endswith(".less"):
less_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if less_text != "":
text_file.write(lesscpy.compile(StringIO(less_text)))
else:
print(file + " is empty! Not compiling Less.")
text_file.close()
if file.endswith(".styl"):
try:
styl_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if styl_text != "":
text_file.write(Stylus().compile(styl_text))
else:
print(file + " is empty! Not compiling Styl.")
text_file.close()
except:
print("Not able to build with Stylus! Is it installed?")
try:
subprocess.call["npm", "install", "-g", "stylus"]
except:
print("NPM (NodeJS) not working. Is it installed?")
if file.endswith(".coffee"):
coffee_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file[:-6] + "js"), "w")
if coffee_text != "":
text_file.write(coffeescript.compile(coffee_text))
else:
print(file + " is empty! Not compiling CoffeeScript.")
text_file.close()
if minify_css:
if file.endswith(".css"):
css_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if css_text != "":
text_file.write(cssmin(css_text))
text_file.close()
if minify_js:
if file.endswith(".js"):
js_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if js_text != "":
text_file.write(jsmin(js_text))
text_file.close()
@cli.command('build', short_help='Build the Blended files into a website')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def build(outdir):
"""Blends the generated files and outputs a HTML website"""
print("Building your Blended files into a website!")
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("The files are built! You can find them in the " + outdir +
"/ directory. Run the view command to see what you have created in a web browser.")
outdir_type = "build"
class Watcher:
"""Watch the specified dirs for changes"""
DIRECTORY_TO_WATCH = os.path.join(cwd, "content")
def __init__(self):
self.observer = Observer()
def run(self):
"""Run the builder on changes"""
event_handler = Handler()
threads = []
paths = [os.path.join(cwd, "content"), os.path.join(cwd, "templates")]
for i in paths:
targetPath = str(i)
self.observer.schedule(event_handler, targetPath, recursive=True)
threads.append(self.observer)
self.observer.start()
try:
while True:
time.sleep(5)
except:
self.observer.stop()
print("\nObserver stopped.")
self.observer.join()
class Handler(FileSystemEventHandler):
"""The handler for the file change watcher"""
@staticmethod
def on_any_event(event):
global outdir_type
if event.is_directory:
return None
elif event.event_type == 'created':
# Take any action here when a file is first created.
build_files(outdir_type)
print("%s created" % event.src_path)
elif event.event_type == 'modified':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s modified" % event.src_path)
elif event.event_type == 'deleted':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s deleted" % event.src_path)
@cli.command('interactive', short_help='Build the Blended files into a website on each file change')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def interactive(outdir):
"""Blends the generated files and outputs a HTML website on file change"""
print("Building your Blended files into a website!")
global outdir_type
outdir_type = outdir
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("Watching the content and templates directories for changes, press CTRL+C to stop...\n")
w = Watcher()
w.run()
@cli.command('view', short_help='View the finished Blended website')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def view(outdir):
"""Opens the built index.html file in a web browser"""
index_path = os.path.realpath(os.path.join(cwd, outdir, "index.html"))
if os.path.exists(index_path):
webbrowser.open('file://' + index_path)
else:
print("The index.html file could not be found in the " + outdir +
"/ folder! Have you deleted it or have you built with home_page_list set to 'no' in config.py?")
if __name__ == '__main__':
cli()
|
BlendedSiteGenerator/Blended | blended/__main__.py | import_blogger | python | def import_blogger(filepath):
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from Blogger...")
blogger = parseXML(filepath)
wname = blogger.feed.title.cdata
aname = blogger.feed.author.name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname, aname=aname)
for entry in blogger.feed.entry:
if "post" in entry.id.cdata:
with open(os.path.join(cwd, "content", entry.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(entry.content.cdata.strip())
print("\nYour website has been imported from Blogger.") | Imports A Blogger export and converts it to a Blended site | train | https://github.com/BlendedSiteGenerator/Blended/blob/e5865a8633e461a22c86ef6ee98cdd7051c412ac/blended/__main__.py#L106-L129 | [
"def checkConfig():\n \"\"\"If the config.py file exists, back it up\"\"\"\n config_file_dir = os.path.join(cwd, \"config.py\")\n if os.path.exists(config_file_dir):\n print(\"Making a backup of your config file!\")\n config_file_dir2 = os.path.join(cwd, \"config.py.oldbak\")\n copyfil... | """This is the Blended Static Website Generator"""
# encoding=utf8
import os
import os.path
import sys
from sys import platform
import shutil
import fileinput
import webbrowser
from datetime import datetime
from random import randint
from ftplib import FTP, error_perm
import time
import calendar
import subprocess
import importlib
import click
import pkg_resources
import markdown
import textile
from docutils.core import publish_parts
import mammoth
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import sass
import pyjade
import lesscpy
from six import StringIO
from stylus import Stylus
import coffeescript
from jsmin import jsmin
from cssmin import cssmin
import pip
from .functions import create_folder, replace_folder, get_html_filename, get_html_clear_filename, getunzipped, checkConfig, createConfig, createBlendedFolders, parseXML
# Very important, get the directory that the user wants to run commands in
cwd = os.getcwd()
try:
app_version = pkg_resources.require("blended")[0].version
app_version = app_version[:3]
except:
app_version = "NOTSET"
print("WARNING: app_version not set.\n")
@click.group()
def cli():
"""Blended: Static Website Generator"""
@cli.command('info', short_help='Show info about Blended and the current project.')
def version():
"""Prints info about Blended"""
print("You are running Blended v" + app_version)
print("The current working directory is " + cwd)
@cli.command('install-template', short_help='Install a Blended template from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_template(username, repo):
"""Installs a Blended template from GitHub"""
print("Installing template from " + username + "/" + repo)
dpath = os.path.join(cwd, "templates")
getunzipped(username, repo, dpath)
@cli.command('import-wp', short_help='Import a site from WordPress')
@click.option('--filepath', prompt='WordPress export file', help='Which file holds the exported data from WordPress')
def import_wp(filepath):
"""Imports A WordPress export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from WordPress...")
wp = parseXML(filepath)
wname = wp.rss.channel.title.cdata
wdesc = wp.rss.channel.description.cdata
wlan = wp.rss.channel.language.cdata
wurl = wp.rss.channel.link.cdata
aname = wp.rss.channel.wp_author.wp_author_display_name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlan=wlan, wurl=wurl, aname=aname)
for item in wp.rss.channel.item:
with open(os.path.join(cwd, "content", item.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(item.content_encoded.cdata.strip())
print("\nYour website has been imported from WordPress.")
@cli.command('import-blogger', short_help='Import a site from Blogger')
@click.option('--filepath', prompt='Blogger export file', help='Which file holds the exported data from Blogger')
@cli.command('install-plugin', short_help='Install a Blended plugin from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_plugin(username, repo):
"""Installs a Blended plugin from GitHub"""
print("Installing plugin from " + username + "/" + repo)
pip.main(['install', '-U', "git+git://github.com/" +
username + "/" + repo + ".git"])
@cli.command('init', short_help='Initiate a new website')
def init():
"""Initiates a new website"""
print("Blended: Static Website Generator -\n")
checkConfig()
if (sys.version_info > (3, 0)):
wname = input("Website Name: ")
wdesc = input("Website Description: ")
wlan = input("Website Language: ")
wlic = input("Website License: ")
aname = input("Author(s) Name(s): ")
else:
wname = raw_input("Website Name: ")
wdesc = raw_input("Website Description: ")
wlan = raw_input("Website Language: ")
wlic = raw_input("Website License: ")
aname = raw_input("Author(s) Name(s): ")
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlic=wlic, wlan=wlan, aname=aname)
print("\nThe required files for your website have been generated.")
def placeFiles(ftp, path):
"""Upload the built files to FTP"""
for name in os.listdir(path):
if name != "config.py" and name != "config.pyc" and name != "templates" and name != "content":
localpath = os.path.join(path, name)
if os.path.isfile(localpath):
print("STOR", name, localpath)
ftp.storbinary('STOR ' + name, open(localpath, 'rb'))
elif os.path.isdir(localpath):
print("MKD", name)
try:
ftp.mkd(name)
# ignore "directory already exists"
except error_perm as e:
if not e.args[0].startswith('550'):
raise
print("CWD", name)
ftp.cwd(name)
placeFiles(ftp, localpath)
print("CWD", "..")
ftp.cwd("..")
@cli.command('ftp', short_help='Upload the files via ftp')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def send_ftp(outdir):
"""Upload the built website to FTP"""
print("Uploading the files in the " + outdir + "/ directory!\n")
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import ftp_server, ftp_username, ftp_password, ftp_port, ftp_upload_path
except:
sys.exit(
"The FTP settings could not be found. Maybe your config file is too old. Re-run 'blended init' to fix it.")
server = ftp_server
username = ftp_username
password = ftp_password
port = ftp_port
ftp = FTP()
ftp.connect(server, port)
ftp.login(username, password)
filenameCV = os.path.join(cwd, outdir)
try:
ftp.cwd(ftp_upload_path)
placeFiles(ftp, filenameCV)
except:
ftp.quit()
sys.exit("Files not able to be uploaded! Are you sure the directory exists?")
ftp.quit()
print("\nFTP Done!")
@cli.command('clean', short_help='Remove the build folder')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def clean_built(outdir):
"""Removes all built files"""
print("Removing the built files!")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
@cli.command('zip', short_help='Package the build folder into a zip file')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def zip_built(outdir):
"""Packages the build folder into a zip"""
print("Zipping the built files!")
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name
except:
sys.exit(
"Some of the configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
zip_dir = os.path.join(cwd, website_name.replace(" ", "_") + "-build-" +
str(datetime.now().date()))
if os.path.exists(build_dir):
shutil.make_archive(zip_dir, 'zip', build_dir)
else:
print("The " + outdir +
"/ folder could not be found! Have you run 'blended build' yet?")
@cli.command('purge', short_help='Purge all the files created by Blended')
def purge():
"""Removes all files generated by Blended"""
print("Purging the Blended files!")
# Remove the templates folder
templ_dir = os.path.join(cwd, "templates")
if os.path.exists(templ_dir):
shutil.rmtree(templ_dir)
# Remove the content folder
cont_dir = os.path.join(cwd, "content")
if os.path.exists(cont_dir):
shutil.rmtree(cont_dir)
# Remove the build folder
build_dir = os.path.join(cwd, "build")
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
# Remove config.py
config_file_dir = os.path.join(cwd, "config.py")
if os.path.exists(config_file_dir):
os.remove(config_file_dir)
# Remove config.pyc
config2_file_dir = os.path.join(cwd, "config.pyc")
if os.path.exists(config2_file_dir):
os.remove(config2_file_dir)
# Remove config.py
config3_file_dir = os.path.join(cwd, "config.py.oldbak")
if os.path.exists(config3_file_dir):
os.remove(config3_file_dir)
def convert_text(filename):
"""Convert the post/page content using the converters"""
text_content = open(filename, "r")
if ".md" in filename:
text_cont1 = "\n" + markdown.markdown(text_content.read()) + "\n"
elif ".docx" in filename:
with open(os.path.join(cwd, "content", filename), "rb") as docx_file:
result = mammoth.convert_to_html(docx_file)
final_docx_html = result.value
text_cont1 = "\n" + final_docx_html + "\n"
elif ".tile" in filename:
text_cont1 = "\n" + textile.textile(text_content.read()) + "\n"
elif ".jade" in filename:
text_cont1 = "\n" + pyjade.simple_convert(text_content.read()) + "\n"
elif ".rst" in filename:
text_cont1 = "\n" + \
publish_parts(text_content.read(), writer_name='html')[
'html_body'] + "\n"
elif ".html" in filename:
text_cont1 = text_content.read()
elif ".txt" in filename:
text_cont1 = text_content.read()
else:
print(filename + " is not a valid file type!")
text_cont1 = "NULL"
return text_cont1 + "\n\n"
def build_files(outdir):
"""Build the files!"""
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name, website_description, website_language, home_page_list
except:
sys.exit(
"ERROR: Some of the crucial configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
try:
from config import website_description_long, website_license, website_url, author_name, author_bio, plugins, minify_css, minify_js, custom_variables
except:
website_description_long = ""
website_license = ""
website_url = ""
author_name = ""
author_bio = ""
plugins = []
custom_variables = {}
minify_css = False
minify_js = False
print("WARNING: Some of the optional configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.\n")
# Create the build folder
build_dir = os.path.join(cwd, outdir)
if "." not in outdir and ".." not in outdir and "..." not in outdir and "...." not in outdir and "....." not in outdir:
replace_folder(build_dir)
# Make sure there is actually a header template file
header_file_dir = os.path.join(cwd, "templates", "header.html")
if not os.path.exists(header_file_dir):
sys.exit(
"There dosen't seem to be a header template file. You need one to generate.")
# Make sure there is actually a footer template file
footer_file_dir = os.path.join(cwd, "templates", "footer.html")
if not os.path.exists(footer_file_dir):
sys.exit(
"There dosen't seem to be a footer template file. You need one to generate.")
# Open the header and footer files for reading
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
# Create the HTML page listing
page_list_item_file = os.path.join(cwd, "templates", "page_list_item.html")
if not os.path.exists(page_list_item_file):
page_list = '<ul class="page-list">\n'
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
for filename in files:
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + '<li class="page-list-item"><a href="' + subfolder_link + newFilename + \
'">' + newFilename2 + '</a><span class="page-list-item-time"> - ' + \
str(file_modified) + '</span></li>\n'
page_list = page_list + '</ul>'
else:
with open(page_list_item_file, 'r') as f:
page_list_item = f.read()
page_list = ""
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
p_content = convert_text(os.path.join(root, filename))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + page_list_item.replace("{path}", subfolder_link + newFilename).replace("{name}", newFilename2).replace(
"{date}", str(file_modified)).replace("{content}", p_content).replace("{content_short}", p_content[:250] + "...").replace("{day}", file_modified_day).replace("{month}", file_modified_month).replace("{month_name}", month_name).replace("{year}", file_modified_year)
if home_page_list == "yes" or home_page_list:
# Open the home page file (index.html) for writing
home_working_file = open(os.path.join(cwd, outdir, "index.html"), "w")
home_working_file.write(header_file.read())
# Make sure there is actually a home page template file
home_templ_dir = os.path.join(cwd, "templates", "home_page.html")
if os.path.exists(home_templ_dir):
home_templ_file = open(home_templ_dir, "r")
home_working_file.write(home_templ_file.read())
else:
print("\nNo home page template file found. Writing page list to index.html")
home_working_file.write(page_list)
home_working_file.write(footer_file.read())
home_working_file.close()
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
if not filename.startswith("_"):
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
newFilename = get_html_filename(filename)
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
currents_working_file = open(
os.path.join(cwd, outdir, newFilename), "w")
else:
create_folder(os.path.join(cwd, outdir, subfolder))
currents_working_file = open(os.path.join(
cwd, outdir, subfolder, newFilename), "w")
# Write the header
currents_working_file.write(header_file.read())
text_cont1 = convert_text(os.path.join(root, filename))
if "+++++" in text_cont1.splitlines()[1]:
page_template_file = text_cont1.splitlines()[0]
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[0], "")
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[1], "")
else:
page_template_file = "content_page"
# Write the text content into the content template and onto the
# build file
content_templ_dir = os.path.join(
cwd, "templates", page_template_file + ".html")
if os.path.exists(content_templ_dir):
content_templ_file = open(content_templ_dir, "r")
content_templ_file1 = content_templ_file.read()
content_templ_file2 = content_templ_file1.replace(
"{page_content}", text_cont1)
currents_working_file.write(content_templ_file2)
else:
currents_working_file.write(text_cont1)
# Write the footer to the build file
currents_working_file.write("\n" + footer_file.read())
# Close the build file
currents_working_file.close()
# Find all the nav(something) templates in the `templates` folder and
# Read their content to the dict
navs = {}
for file in os.listdir(os.path.join(cwd, "templates")):
if "nav" in file:
nav_cont = open(os.path.join(cwd, "templates", file), "r")
navs[file.replace(".html", "")] = nav_cont.read()
nav_cont.close()
forbidden_dirs = set(["assets", "templates"])
blended_version_message = "Built with Blended v" + \
str(app_version)
build_date = str(datetime.now().date())
build_time = str(datetime.now().time())
build_datetime = str(datetime.now())
# Replace global variables such as site name and language
for root, dirs, files in os.walk(os.path.join(cwd, outdir)):
dirs[:] = [d for d in dirs if d not in forbidden_dirs]
for filename in files:
if filename != "config.pyc" and filename != "config.py":
newFilename = get_html_clear_filename(filename)
page_file = filename.replace(".html", "")
page_folder = os.path.basename(os.path.dirname(os.path.join(
root, filename))).replace("-", "").replace("_", "").title()
page_folder_orig = os.path.basename(
os.path.dirname(os.path.join(root, filename)))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, outdir), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_folder = os.path.join(cwd, outdir, filename)
else:
subfolder_folder = os.path.join(
cwd, outdir, subfolder, filename)
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
# The Loop!
for line in fileinput.input(subfolder_folder, inplace=1):
for var in custom_variables:
line = line.replace(
"{" + var + "}", custom_variables[var])
if len(plugins) != 0:
for i in range(len(plugins)):
if sys.version_info[0] < 2:
main = importlib.import_module(plugins[i])
elif sys.version_info[0] < 3:
main = __import__(plugins[i])
content = main.main()
line = line.replace(
"{" + plugins[i] + "}", content)
if "{nav" in line:
navname = line.split("{")[1].split("}")[0]
line = line.replace(
"{" + navname + "}", navs[(line.split("{"))[1].split("}")[0]])
line = line.replace(
"{website_description}", website_description)
line = line.replace(
"{website_description_long}", website_description_long)
line = line.replace("{website_license}", website_license)
line = line.replace("{website_language}", website_language)
line = line.replace("{website_url}", website_url)
line = line.replace("{author_name}", author_name)
line = line.replace("{author_bio}", author_bio)
line = line.replace("{random_number}",
str(randint(0, 100000000)))
line = line.replace("{build_date}", build_date)
line = line.replace("{build_time}", build_time)
line = line.replace("{build_datetime}", build_datetime)
line = line.replace("{page_list}", page_list)
line = line.replace("{page_name}", newFilename)
line = line.replace("{page_filename}", page_file)
line = line.replace("{page_file}", filename)
line = line.replace("{" + filename + "_active}", "active")
if page_folder != outdir.title():
line = line.replace("{page_folder}", page_folder)
else:
line = line.replace("{page_folder}", "")
if page_folder_orig != outdir:
line = line.replace(
"{page_folder_orig}", page_folder_orig)
else:
line = line.replace("{page_folder_orig}", "")
line = line.replace("{page_date}", str(file_modified))
line = line.replace("{page_day}", str(file_modified_day))
line = line.replace("{page_year}", str(file_modified_year))
line = line.replace(
"{page_month}", str(file_modified_month))
line = line.replace(
"{page_month_name}", str(month_name))
line = line.replace("{blended_version}", str(app_version))
line = line.replace(
"{blended_version_message}", blended_version_message)
line = line.replace("{website_name}", website_name)
top = os.path.join(cwd, outdir)
startinglevel = top.count(os.sep)
relative_path = ""
level = root.count(os.sep) - startinglevel
for i in range(level):
relative_path = relative_path + "../"
line = line.replace("{relative_root}", relative_path)
print(line.rstrip('\n'))
fileinput.close()
# Copy the asset folder to the build folder
if os.path.exists(os.path.join(cwd, "templates", "assets")):
if os.path.exists(os.path.join(cwd, outdir, "assets")):
shutil.rmtree(os.path.join(cwd, outdir, "assets"))
shutil.copytree(os.path.join(cwd, "templates", "assets"),
os.path.join(cwd, outdir, "assets"))
for root, dirs, files in os.walk(os.path.join(cwd, outdir, "assets")):
for file in files:
if not file.startswith("_"):
if (file.endswith(".sass")) or (file.endswith(".scss")):
sass_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if sass_text != "":
text_file.write(sass.compile(string=sass_text))
else:
print(file + " is empty! Not compiling Sass.")
text_file.close()
if file.endswith(".less"):
less_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if less_text != "":
text_file.write(lesscpy.compile(StringIO(less_text)))
else:
print(file + " is empty! Not compiling Less.")
text_file.close()
if file.endswith(".styl"):
try:
styl_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if styl_text != "":
text_file.write(Stylus().compile(styl_text))
else:
print(file + " is empty! Not compiling Styl.")
text_file.close()
except:
print("Not able to build with Stylus! Is it installed?")
try:
subprocess.call["npm", "install", "-g", "stylus"]
except:
print("NPM (NodeJS) not working. Is it installed?")
if file.endswith(".coffee"):
coffee_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file[:-6] + "js"), "w")
if coffee_text != "":
text_file.write(coffeescript.compile(coffee_text))
else:
print(file + " is empty! Not compiling CoffeeScript.")
text_file.close()
if minify_css:
if file.endswith(".css"):
css_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if css_text != "":
text_file.write(cssmin(css_text))
text_file.close()
if minify_js:
if file.endswith(".js"):
js_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if js_text != "":
text_file.write(jsmin(js_text))
text_file.close()
@cli.command('build', short_help='Build the Blended files into a website')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def build(outdir):
"""Blends the generated files and outputs a HTML website"""
print("Building your Blended files into a website!")
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("The files are built! You can find them in the " + outdir +
"/ directory. Run the view command to see what you have created in a web browser.")
outdir_type = "build"
class Watcher:
"""Watch the specified dirs for changes"""
DIRECTORY_TO_WATCH = os.path.join(cwd, "content")
def __init__(self):
self.observer = Observer()
def run(self):
"""Run the builder on changes"""
event_handler = Handler()
threads = []
paths = [os.path.join(cwd, "content"), os.path.join(cwd, "templates")]
for i in paths:
targetPath = str(i)
self.observer.schedule(event_handler, targetPath, recursive=True)
threads.append(self.observer)
self.observer.start()
try:
while True:
time.sleep(5)
except:
self.observer.stop()
print("\nObserver stopped.")
self.observer.join()
class Handler(FileSystemEventHandler):
"""The handler for the file change watcher"""
@staticmethod
def on_any_event(event):
global outdir_type
if event.is_directory:
return None
elif event.event_type == 'created':
# Take any action here when a file is first created.
build_files(outdir_type)
print("%s created" % event.src_path)
elif event.event_type == 'modified':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s modified" % event.src_path)
elif event.event_type == 'deleted':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s deleted" % event.src_path)
@cli.command('interactive', short_help='Build the Blended files into a website on each file change')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def interactive(outdir):
"""Blends the generated files and outputs a HTML website on file change"""
print("Building your Blended files into a website!")
global outdir_type
outdir_type = outdir
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("Watching the content and templates directories for changes, press CTRL+C to stop...\n")
w = Watcher()
w.run()
@cli.command('view', short_help='View the finished Blended website')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def view(outdir):
"""Opens the built index.html file in a web browser"""
index_path = os.path.realpath(os.path.join(cwd, outdir, "index.html"))
if os.path.exists(index_path):
webbrowser.open('file://' + index_path)
else:
print("The index.html file could not be found in the " + outdir +
"/ folder! Have you deleted it or have you built with home_page_list set to 'no' in config.py?")
if __name__ == '__main__':
cli()
|
BlendedSiteGenerator/Blended | blended/__main__.py | install_plugin | python | def install_plugin(username, repo):
print("Installing plugin from " + username + "/" + repo)
pip.main(['install', '-U', "git+git://github.com/" +
username + "/" + repo + ".git"]) | Installs a Blended plugin from GitHub | train | https://github.com/BlendedSiteGenerator/Blended/blob/e5865a8633e461a22c86ef6ee98cdd7051c412ac/blended/__main__.py#L137-L142 | null | """This is the Blended Static Website Generator"""
# encoding=utf8
import os
import os.path
import sys
from sys import platform
import shutil
import fileinput
import webbrowser
from datetime import datetime
from random import randint
from ftplib import FTP, error_perm
import time
import calendar
import subprocess
import importlib
import click
import pkg_resources
import markdown
import textile
from docutils.core import publish_parts
import mammoth
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import sass
import pyjade
import lesscpy
from six import StringIO
from stylus import Stylus
import coffeescript
from jsmin import jsmin
from cssmin import cssmin
import pip
from .functions import create_folder, replace_folder, get_html_filename, get_html_clear_filename, getunzipped, checkConfig, createConfig, createBlendedFolders, parseXML
# Very important, get the directory that the user wants to run commands in
cwd = os.getcwd()
try:
app_version = pkg_resources.require("blended")[0].version
app_version = app_version[:3]
except:
app_version = "NOTSET"
print("WARNING: app_version not set.\n")
@click.group()
def cli():
"""Blended: Static Website Generator"""
@cli.command('info', short_help='Show info about Blended and the current project.')
def version():
"""Prints info about Blended"""
print("You are running Blended v" + app_version)
print("The current working directory is " + cwd)
@cli.command('install-template', short_help='Install a Blended template from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_template(username, repo):
"""Installs a Blended template from GitHub"""
print("Installing template from " + username + "/" + repo)
dpath = os.path.join(cwd, "templates")
getunzipped(username, repo, dpath)
@cli.command('import-wp', short_help='Import a site from WordPress')
@click.option('--filepath', prompt='WordPress export file', help='Which file holds the exported data from WordPress')
def import_wp(filepath):
"""Imports A WordPress export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from WordPress...")
wp = parseXML(filepath)
wname = wp.rss.channel.title.cdata
wdesc = wp.rss.channel.description.cdata
wlan = wp.rss.channel.language.cdata
wurl = wp.rss.channel.link.cdata
aname = wp.rss.channel.wp_author.wp_author_display_name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlan=wlan, wurl=wurl, aname=aname)
for item in wp.rss.channel.item:
with open(os.path.join(cwd, "content", item.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(item.content_encoded.cdata.strip())
print("\nYour website has been imported from WordPress.")
@cli.command('import-blogger', short_help='Import a site from Blogger')
@click.option('--filepath', prompt='Blogger export file', help='Which file holds the exported data from Blogger')
def import_blogger(filepath):
"""Imports A Blogger export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from Blogger...")
blogger = parseXML(filepath)
wname = blogger.feed.title.cdata
aname = blogger.feed.author.name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname, aname=aname)
for entry in blogger.feed.entry:
if "post" in entry.id.cdata:
with open(os.path.join(cwd, "content", entry.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(entry.content.cdata.strip())
print("\nYour website has been imported from Blogger.")
@cli.command('install-plugin', short_help='Install a Blended plugin from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
@cli.command('init', short_help='Initiate a new website')
def init():
"""Initiates a new website"""
print("Blended: Static Website Generator -\n")
checkConfig()
if (sys.version_info > (3, 0)):
wname = input("Website Name: ")
wdesc = input("Website Description: ")
wlan = input("Website Language: ")
wlic = input("Website License: ")
aname = input("Author(s) Name(s): ")
else:
wname = raw_input("Website Name: ")
wdesc = raw_input("Website Description: ")
wlan = raw_input("Website Language: ")
wlic = raw_input("Website License: ")
aname = raw_input("Author(s) Name(s): ")
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlic=wlic, wlan=wlan, aname=aname)
print("\nThe required files for your website have been generated.")
def placeFiles(ftp, path):
"""Upload the built files to FTP"""
for name in os.listdir(path):
if name != "config.py" and name != "config.pyc" and name != "templates" and name != "content":
localpath = os.path.join(path, name)
if os.path.isfile(localpath):
print("STOR", name, localpath)
ftp.storbinary('STOR ' + name, open(localpath, 'rb'))
elif os.path.isdir(localpath):
print("MKD", name)
try:
ftp.mkd(name)
# ignore "directory already exists"
except error_perm as e:
if not e.args[0].startswith('550'):
raise
print("CWD", name)
ftp.cwd(name)
placeFiles(ftp, localpath)
print("CWD", "..")
ftp.cwd("..")
@cli.command('ftp', short_help='Upload the files via ftp')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def send_ftp(outdir):
"""Upload the built website to FTP"""
print("Uploading the files in the " + outdir + "/ directory!\n")
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import ftp_server, ftp_username, ftp_password, ftp_port, ftp_upload_path
except:
sys.exit(
"The FTP settings could not be found. Maybe your config file is too old. Re-run 'blended init' to fix it.")
server = ftp_server
username = ftp_username
password = ftp_password
port = ftp_port
ftp = FTP()
ftp.connect(server, port)
ftp.login(username, password)
filenameCV = os.path.join(cwd, outdir)
try:
ftp.cwd(ftp_upload_path)
placeFiles(ftp, filenameCV)
except:
ftp.quit()
sys.exit("Files not able to be uploaded! Are you sure the directory exists?")
ftp.quit()
print("\nFTP Done!")
@cli.command('clean', short_help='Remove the build folder')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def clean_built(outdir):
"""Removes all built files"""
print("Removing the built files!")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
@cli.command('zip', short_help='Package the build folder into a zip file')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def zip_built(outdir):
"""Packages the build folder into a zip"""
print("Zipping the built files!")
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name
except:
sys.exit(
"Some of the configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
zip_dir = os.path.join(cwd, website_name.replace(" ", "_") + "-build-" +
str(datetime.now().date()))
if os.path.exists(build_dir):
shutil.make_archive(zip_dir, 'zip', build_dir)
else:
print("The " + outdir +
"/ folder could not be found! Have you run 'blended build' yet?")
@cli.command('purge', short_help='Purge all the files created by Blended')
def purge():
"""Removes all files generated by Blended"""
print("Purging the Blended files!")
# Remove the templates folder
templ_dir = os.path.join(cwd, "templates")
if os.path.exists(templ_dir):
shutil.rmtree(templ_dir)
# Remove the content folder
cont_dir = os.path.join(cwd, "content")
if os.path.exists(cont_dir):
shutil.rmtree(cont_dir)
# Remove the build folder
build_dir = os.path.join(cwd, "build")
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
# Remove config.py
config_file_dir = os.path.join(cwd, "config.py")
if os.path.exists(config_file_dir):
os.remove(config_file_dir)
# Remove config.pyc
config2_file_dir = os.path.join(cwd, "config.pyc")
if os.path.exists(config2_file_dir):
os.remove(config2_file_dir)
# Remove config.py
config3_file_dir = os.path.join(cwd, "config.py.oldbak")
if os.path.exists(config3_file_dir):
os.remove(config3_file_dir)
def convert_text(filename):
"""Convert the post/page content using the converters"""
text_content = open(filename, "r")
if ".md" in filename:
text_cont1 = "\n" + markdown.markdown(text_content.read()) + "\n"
elif ".docx" in filename:
with open(os.path.join(cwd, "content", filename), "rb") as docx_file:
result = mammoth.convert_to_html(docx_file)
final_docx_html = result.value
text_cont1 = "\n" + final_docx_html + "\n"
elif ".tile" in filename:
text_cont1 = "\n" + textile.textile(text_content.read()) + "\n"
elif ".jade" in filename:
text_cont1 = "\n" + pyjade.simple_convert(text_content.read()) + "\n"
elif ".rst" in filename:
text_cont1 = "\n" + \
publish_parts(text_content.read(), writer_name='html')[
'html_body'] + "\n"
elif ".html" in filename:
text_cont1 = text_content.read()
elif ".txt" in filename:
text_cont1 = text_content.read()
else:
print(filename + " is not a valid file type!")
text_cont1 = "NULL"
return text_cont1 + "\n\n"
def build_files(outdir):
"""Build the files!"""
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name, website_description, website_language, home_page_list
except:
sys.exit(
"ERROR: Some of the crucial configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
try:
from config import website_description_long, website_license, website_url, author_name, author_bio, plugins, minify_css, minify_js, custom_variables
except:
website_description_long = ""
website_license = ""
website_url = ""
author_name = ""
author_bio = ""
plugins = []
custom_variables = {}
minify_css = False
minify_js = False
print("WARNING: Some of the optional configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.\n")
# Create the build folder
build_dir = os.path.join(cwd, outdir)
if "." not in outdir and ".." not in outdir and "..." not in outdir and "...." not in outdir and "....." not in outdir:
replace_folder(build_dir)
# Make sure there is actually a header template file
header_file_dir = os.path.join(cwd, "templates", "header.html")
if not os.path.exists(header_file_dir):
sys.exit(
"There dosen't seem to be a header template file. You need one to generate.")
# Make sure there is actually a footer template file
footer_file_dir = os.path.join(cwd, "templates", "footer.html")
if not os.path.exists(footer_file_dir):
sys.exit(
"There dosen't seem to be a footer template file. You need one to generate.")
# Open the header and footer files for reading
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
# Create the HTML page listing
page_list_item_file = os.path.join(cwd, "templates", "page_list_item.html")
if not os.path.exists(page_list_item_file):
page_list = '<ul class="page-list">\n'
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
for filename in files:
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + '<li class="page-list-item"><a href="' + subfolder_link + newFilename + \
'">' + newFilename2 + '</a><span class="page-list-item-time"> - ' + \
str(file_modified) + '</span></li>\n'
page_list = page_list + '</ul>'
else:
with open(page_list_item_file, 'r') as f:
page_list_item = f.read()
page_list = ""
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
p_content = convert_text(os.path.join(root, filename))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + page_list_item.replace("{path}", subfolder_link + newFilename).replace("{name}", newFilename2).replace(
"{date}", str(file_modified)).replace("{content}", p_content).replace("{content_short}", p_content[:250] + "...").replace("{day}", file_modified_day).replace("{month}", file_modified_month).replace("{month_name}", month_name).replace("{year}", file_modified_year)
if home_page_list == "yes" or home_page_list:
# Open the home page file (index.html) for writing
home_working_file = open(os.path.join(cwd, outdir, "index.html"), "w")
home_working_file.write(header_file.read())
# Make sure there is actually a home page template file
home_templ_dir = os.path.join(cwd, "templates", "home_page.html")
if os.path.exists(home_templ_dir):
home_templ_file = open(home_templ_dir, "r")
home_working_file.write(home_templ_file.read())
else:
print("\nNo home page template file found. Writing page list to index.html")
home_working_file.write(page_list)
home_working_file.write(footer_file.read())
home_working_file.close()
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
if not filename.startswith("_"):
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
newFilename = get_html_filename(filename)
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
currents_working_file = open(
os.path.join(cwd, outdir, newFilename), "w")
else:
create_folder(os.path.join(cwd, outdir, subfolder))
currents_working_file = open(os.path.join(
cwd, outdir, subfolder, newFilename), "w")
# Write the header
currents_working_file.write(header_file.read())
text_cont1 = convert_text(os.path.join(root, filename))
if "+++++" in text_cont1.splitlines()[1]:
page_template_file = text_cont1.splitlines()[0]
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[0], "")
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[1], "")
else:
page_template_file = "content_page"
# Write the text content into the content template and onto the
# build file
content_templ_dir = os.path.join(
cwd, "templates", page_template_file + ".html")
if os.path.exists(content_templ_dir):
content_templ_file = open(content_templ_dir, "r")
content_templ_file1 = content_templ_file.read()
content_templ_file2 = content_templ_file1.replace(
"{page_content}", text_cont1)
currents_working_file.write(content_templ_file2)
else:
currents_working_file.write(text_cont1)
# Write the footer to the build file
currents_working_file.write("\n" + footer_file.read())
# Close the build file
currents_working_file.close()
# Find all the nav(something) templates in the `templates` folder and
# Read their content to the dict
navs = {}
for file in os.listdir(os.path.join(cwd, "templates")):
if "nav" in file:
nav_cont = open(os.path.join(cwd, "templates", file), "r")
navs[file.replace(".html", "")] = nav_cont.read()
nav_cont.close()
forbidden_dirs = set(["assets", "templates"])
blended_version_message = "Built with Blended v" + \
str(app_version)
build_date = str(datetime.now().date())
build_time = str(datetime.now().time())
build_datetime = str(datetime.now())
# Replace global variables such as site name and language
for root, dirs, files in os.walk(os.path.join(cwd, outdir)):
dirs[:] = [d for d in dirs if d not in forbidden_dirs]
for filename in files:
if filename != "config.pyc" and filename != "config.py":
newFilename = get_html_clear_filename(filename)
page_file = filename.replace(".html", "")
page_folder = os.path.basename(os.path.dirname(os.path.join(
root, filename))).replace("-", "").replace("_", "").title()
page_folder_orig = os.path.basename(
os.path.dirname(os.path.join(root, filename)))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, outdir), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_folder = os.path.join(cwd, outdir, filename)
else:
subfolder_folder = os.path.join(
cwd, outdir, subfolder, filename)
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
# The Loop!
for line in fileinput.input(subfolder_folder, inplace=1):
for var in custom_variables:
line = line.replace(
"{" + var + "}", custom_variables[var])
if len(plugins) != 0:
for i in range(len(plugins)):
if sys.version_info[0] < 2:
main = importlib.import_module(plugins[i])
elif sys.version_info[0] < 3:
main = __import__(plugins[i])
content = main.main()
line = line.replace(
"{" + plugins[i] + "}", content)
if "{nav" in line:
navname = line.split("{")[1].split("}")[0]
line = line.replace(
"{" + navname + "}", navs[(line.split("{"))[1].split("}")[0]])
line = line.replace(
"{website_description}", website_description)
line = line.replace(
"{website_description_long}", website_description_long)
line = line.replace("{website_license}", website_license)
line = line.replace("{website_language}", website_language)
line = line.replace("{website_url}", website_url)
line = line.replace("{author_name}", author_name)
line = line.replace("{author_bio}", author_bio)
line = line.replace("{random_number}",
str(randint(0, 100000000)))
line = line.replace("{build_date}", build_date)
line = line.replace("{build_time}", build_time)
line = line.replace("{build_datetime}", build_datetime)
line = line.replace("{page_list}", page_list)
line = line.replace("{page_name}", newFilename)
line = line.replace("{page_filename}", page_file)
line = line.replace("{page_file}", filename)
line = line.replace("{" + filename + "_active}", "active")
if page_folder != outdir.title():
line = line.replace("{page_folder}", page_folder)
else:
line = line.replace("{page_folder}", "")
if page_folder_orig != outdir:
line = line.replace(
"{page_folder_orig}", page_folder_orig)
else:
line = line.replace("{page_folder_orig}", "")
line = line.replace("{page_date}", str(file_modified))
line = line.replace("{page_day}", str(file_modified_day))
line = line.replace("{page_year}", str(file_modified_year))
line = line.replace(
"{page_month}", str(file_modified_month))
line = line.replace(
"{page_month_name}", str(month_name))
line = line.replace("{blended_version}", str(app_version))
line = line.replace(
"{blended_version_message}", blended_version_message)
line = line.replace("{website_name}", website_name)
top = os.path.join(cwd, outdir)
startinglevel = top.count(os.sep)
relative_path = ""
level = root.count(os.sep) - startinglevel
for i in range(level):
relative_path = relative_path + "../"
line = line.replace("{relative_root}", relative_path)
print(line.rstrip('\n'))
fileinput.close()
# Copy the asset folder to the build folder
if os.path.exists(os.path.join(cwd, "templates", "assets")):
if os.path.exists(os.path.join(cwd, outdir, "assets")):
shutil.rmtree(os.path.join(cwd, outdir, "assets"))
shutil.copytree(os.path.join(cwd, "templates", "assets"),
os.path.join(cwd, outdir, "assets"))
for root, dirs, files in os.walk(os.path.join(cwd, outdir, "assets")):
for file in files:
if not file.startswith("_"):
if (file.endswith(".sass")) or (file.endswith(".scss")):
sass_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if sass_text != "":
text_file.write(sass.compile(string=sass_text))
else:
print(file + " is empty! Not compiling Sass.")
text_file.close()
if file.endswith(".less"):
less_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if less_text != "":
text_file.write(lesscpy.compile(StringIO(less_text)))
else:
print(file + " is empty! Not compiling Less.")
text_file.close()
if file.endswith(".styl"):
try:
styl_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if styl_text != "":
text_file.write(Stylus().compile(styl_text))
else:
print(file + " is empty! Not compiling Styl.")
text_file.close()
except:
print("Not able to build with Stylus! Is it installed?")
try:
subprocess.call["npm", "install", "-g", "stylus"]
except:
print("NPM (NodeJS) not working. Is it installed?")
if file.endswith(".coffee"):
coffee_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file[:-6] + "js"), "w")
if coffee_text != "":
text_file.write(coffeescript.compile(coffee_text))
else:
print(file + " is empty! Not compiling CoffeeScript.")
text_file.close()
if minify_css:
if file.endswith(".css"):
css_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if css_text != "":
text_file.write(cssmin(css_text))
text_file.close()
if minify_js:
if file.endswith(".js"):
js_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if js_text != "":
text_file.write(jsmin(js_text))
text_file.close()
@cli.command('build', short_help='Build the Blended files into a website')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def build(outdir):
"""Blends the generated files and outputs a HTML website"""
print("Building your Blended files into a website!")
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("The files are built! You can find them in the " + outdir +
"/ directory. Run the view command to see what you have created in a web browser.")
outdir_type = "build"
class Watcher:
"""Watch the specified dirs for changes"""
DIRECTORY_TO_WATCH = os.path.join(cwd, "content")
def __init__(self):
self.observer = Observer()
def run(self):
"""Run the builder on changes"""
event_handler = Handler()
threads = []
paths = [os.path.join(cwd, "content"), os.path.join(cwd, "templates")]
for i in paths:
targetPath = str(i)
self.observer.schedule(event_handler, targetPath, recursive=True)
threads.append(self.observer)
self.observer.start()
try:
while True:
time.sleep(5)
except:
self.observer.stop()
print("\nObserver stopped.")
self.observer.join()
class Handler(FileSystemEventHandler):
"""The handler for the file change watcher"""
@staticmethod
def on_any_event(event):
global outdir_type
if event.is_directory:
return None
elif event.event_type == 'created':
# Take any action here when a file is first created.
build_files(outdir_type)
print("%s created" % event.src_path)
elif event.event_type == 'modified':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s modified" % event.src_path)
elif event.event_type == 'deleted':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s deleted" % event.src_path)
@cli.command('interactive', short_help='Build the Blended files into a website on each file change')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def interactive(outdir):
"""Blends the generated files and outputs a HTML website on file change"""
print("Building your Blended files into a website!")
global outdir_type
outdir_type = outdir
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("Watching the content and templates directories for changes, press CTRL+C to stop...\n")
w = Watcher()
w.run()
@cli.command('view', short_help='View the finished Blended website')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def view(outdir):
"""Opens the built index.html file in a web browser"""
index_path = os.path.realpath(os.path.join(cwd, outdir, "index.html"))
if os.path.exists(index_path):
webbrowser.open('file://' + index_path)
else:
print("The index.html file could not be found in the " + outdir +
"/ folder! Have you deleted it or have you built with home_page_list set to 'no' in config.py?")
if __name__ == '__main__':
cli()
|
BlendedSiteGenerator/Blended | blended/__main__.py | init | python | def init():
print("Blended: Static Website Generator -\n")
checkConfig()
if (sys.version_info > (3, 0)):
wname = input("Website Name: ")
wdesc = input("Website Description: ")
wlan = input("Website Language: ")
wlic = input("Website License: ")
aname = input("Author(s) Name(s): ")
else:
wname = raw_input("Website Name: ")
wdesc = raw_input("Website Description: ")
wlan = raw_input("Website Language: ")
wlic = raw_input("Website License: ")
aname = raw_input("Author(s) Name(s): ")
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlic=wlic, wlan=wlan, aname=aname)
print("\nThe required files for your website have been generated.") | Initiates a new website | train | https://github.com/BlendedSiteGenerator/Blended/blob/e5865a8633e461a22c86ef6ee98cdd7051c412ac/blended/__main__.py#L146-L172 | [
"def checkConfig():\n \"\"\"If the config.py file exists, back it up\"\"\"\n config_file_dir = os.path.join(cwd, \"config.py\")\n if os.path.exists(config_file_dir):\n print(\"Making a backup of your config file!\")\n config_file_dir2 = os.path.join(cwd, \"config.py.oldbak\")\n copyfil... | """This is the Blended Static Website Generator"""
# encoding=utf8
import os
import os.path
import sys
from sys import platform
import shutil
import fileinput
import webbrowser
from datetime import datetime
from random import randint
from ftplib import FTP, error_perm
import time
import calendar
import subprocess
import importlib
import click
import pkg_resources
import markdown
import textile
from docutils.core import publish_parts
import mammoth
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import sass
import pyjade
import lesscpy
from six import StringIO
from stylus import Stylus
import coffeescript
from jsmin import jsmin
from cssmin import cssmin
import pip
from .functions import create_folder, replace_folder, get_html_filename, get_html_clear_filename, getunzipped, checkConfig, createConfig, createBlendedFolders, parseXML
# Very important, get the directory that the user wants to run commands in
cwd = os.getcwd()
try:
app_version = pkg_resources.require("blended")[0].version
app_version = app_version[:3]
except:
app_version = "NOTSET"
print("WARNING: app_version not set.\n")
@click.group()
def cli():
"""Blended: Static Website Generator"""
@cli.command('info', short_help='Show info about Blended and the current project.')
def version():
"""Prints info about Blended"""
print("You are running Blended v" + app_version)
print("The current working directory is " + cwd)
@cli.command('install-template', short_help='Install a Blended template from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_template(username, repo):
"""Installs a Blended template from GitHub"""
print("Installing template from " + username + "/" + repo)
dpath = os.path.join(cwd, "templates")
getunzipped(username, repo, dpath)
@cli.command('import-wp', short_help='Import a site from WordPress')
@click.option('--filepath', prompt='WordPress export file', help='Which file holds the exported data from WordPress')
def import_wp(filepath):
"""Imports A WordPress export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from WordPress...")
wp = parseXML(filepath)
wname = wp.rss.channel.title.cdata
wdesc = wp.rss.channel.description.cdata
wlan = wp.rss.channel.language.cdata
wurl = wp.rss.channel.link.cdata
aname = wp.rss.channel.wp_author.wp_author_display_name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlan=wlan, wurl=wurl, aname=aname)
for item in wp.rss.channel.item:
with open(os.path.join(cwd, "content", item.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(item.content_encoded.cdata.strip())
print("\nYour website has been imported from WordPress.")
@cli.command('import-blogger', short_help='Import a site from Blogger')
@click.option('--filepath', prompt='Blogger export file', help='Which file holds the exported data from Blogger')
def import_blogger(filepath):
"""Imports A Blogger export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from Blogger...")
blogger = parseXML(filepath)
wname = blogger.feed.title.cdata
aname = blogger.feed.author.name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname, aname=aname)
for entry in blogger.feed.entry:
if "post" in entry.id.cdata:
with open(os.path.join(cwd, "content", entry.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(entry.content.cdata.strip())
print("\nYour website has been imported from Blogger.")
@cli.command('install-plugin', short_help='Install a Blended plugin from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_plugin(username, repo):
"""Installs a Blended plugin from GitHub"""
print("Installing plugin from " + username + "/" + repo)
pip.main(['install', '-U', "git+git://github.com/" +
username + "/" + repo + ".git"])
@cli.command('init', short_help='Initiate a new website')
def placeFiles(ftp, path):
"""Upload the built files to FTP"""
for name in os.listdir(path):
if name != "config.py" and name != "config.pyc" and name != "templates" and name != "content":
localpath = os.path.join(path, name)
if os.path.isfile(localpath):
print("STOR", name, localpath)
ftp.storbinary('STOR ' + name, open(localpath, 'rb'))
elif os.path.isdir(localpath):
print("MKD", name)
try:
ftp.mkd(name)
# ignore "directory already exists"
except error_perm as e:
if not e.args[0].startswith('550'):
raise
print("CWD", name)
ftp.cwd(name)
placeFiles(ftp, localpath)
print("CWD", "..")
ftp.cwd("..")
@cli.command('ftp', short_help='Upload the files via ftp')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def send_ftp(outdir):
"""Upload the built website to FTP"""
print("Uploading the files in the " + outdir + "/ directory!\n")
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import ftp_server, ftp_username, ftp_password, ftp_port, ftp_upload_path
except:
sys.exit(
"The FTP settings could not be found. Maybe your config file is too old. Re-run 'blended init' to fix it.")
server = ftp_server
username = ftp_username
password = ftp_password
port = ftp_port
ftp = FTP()
ftp.connect(server, port)
ftp.login(username, password)
filenameCV = os.path.join(cwd, outdir)
try:
ftp.cwd(ftp_upload_path)
placeFiles(ftp, filenameCV)
except:
ftp.quit()
sys.exit("Files not able to be uploaded! Are you sure the directory exists?")
ftp.quit()
print("\nFTP Done!")
@cli.command('clean', short_help='Remove the build folder')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def clean_built(outdir):
"""Removes all built files"""
print("Removing the built files!")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
@cli.command('zip', short_help='Package the build folder into a zip file')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def zip_built(outdir):
"""Packages the build folder into a zip"""
print("Zipping the built files!")
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name
except:
sys.exit(
"Some of the configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
zip_dir = os.path.join(cwd, website_name.replace(" ", "_") + "-build-" +
str(datetime.now().date()))
if os.path.exists(build_dir):
shutil.make_archive(zip_dir, 'zip', build_dir)
else:
print("The " + outdir +
"/ folder could not be found! Have you run 'blended build' yet?")
@cli.command('purge', short_help='Purge all the files created by Blended')
def purge():
"""Removes all files generated by Blended"""
print("Purging the Blended files!")
# Remove the templates folder
templ_dir = os.path.join(cwd, "templates")
if os.path.exists(templ_dir):
shutil.rmtree(templ_dir)
# Remove the content folder
cont_dir = os.path.join(cwd, "content")
if os.path.exists(cont_dir):
shutil.rmtree(cont_dir)
# Remove the build folder
build_dir = os.path.join(cwd, "build")
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
# Remove config.py
config_file_dir = os.path.join(cwd, "config.py")
if os.path.exists(config_file_dir):
os.remove(config_file_dir)
# Remove config.pyc
config2_file_dir = os.path.join(cwd, "config.pyc")
if os.path.exists(config2_file_dir):
os.remove(config2_file_dir)
# Remove config.py
config3_file_dir = os.path.join(cwd, "config.py.oldbak")
if os.path.exists(config3_file_dir):
os.remove(config3_file_dir)
def convert_text(filename):
"""Convert the post/page content using the converters"""
text_content = open(filename, "r")
if ".md" in filename:
text_cont1 = "\n" + markdown.markdown(text_content.read()) + "\n"
elif ".docx" in filename:
with open(os.path.join(cwd, "content", filename), "rb") as docx_file:
result = mammoth.convert_to_html(docx_file)
final_docx_html = result.value
text_cont1 = "\n" + final_docx_html + "\n"
elif ".tile" in filename:
text_cont1 = "\n" + textile.textile(text_content.read()) + "\n"
elif ".jade" in filename:
text_cont1 = "\n" + pyjade.simple_convert(text_content.read()) + "\n"
elif ".rst" in filename:
text_cont1 = "\n" + \
publish_parts(text_content.read(), writer_name='html')[
'html_body'] + "\n"
elif ".html" in filename:
text_cont1 = text_content.read()
elif ".txt" in filename:
text_cont1 = text_content.read()
else:
print(filename + " is not a valid file type!")
text_cont1 = "NULL"
return text_cont1 + "\n\n"
def build_files(outdir):
"""Build the files!"""
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name, website_description, website_language, home_page_list
except:
sys.exit(
"ERROR: Some of the crucial configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
try:
from config import website_description_long, website_license, website_url, author_name, author_bio, plugins, minify_css, minify_js, custom_variables
except:
website_description_long = ""
website_license = ""
website_url = ""
author_name = ""
author_bio = ""
plugins = []
custom_variables = {}
minify_css = False
minify_js = False
print("WARNING: Some of the optional configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.\n")
# Create the build folder
build_dir = os.path.join(cwd, outdir)
if "." not in outdir and ".." not in outdir and "..." not in outdir and "...." not in outdir and "....." not in outdir:
replace_folder(build_dir)
# Make sure there is actually a header template file
header_file_dir = os.path.join(cwd, "templates", "header.html")
if not os.path.exists(header_file_dir):
sys.exit(
"There dosen't seem to be a header template file. You need one to generate.")
# Make sure there is actually a footer template file
footer_file_dir = os.path.join(cwd, "templates", "footer.html")
if not os.path.exists(footer_file_dir):
sys.exit(
"There dosen't seem to be a footer template file. You need one to generate.")
# Open the header and footer files for reading
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
# Create the HTML page listing
page_list_item_file = os.path.join(cwd, "templates", "page_list_item.html")
if not os.path.exists(page_list_item_file):
page_list = '<ul class="page-list">\n'
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
for filename in files:
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + '<li class="page-list-item"><a href="' + subfolder_link + newFilename + \
'">' + newFilename2 + '</a><span class="page-list-item-time"> - ' + \
str(file_modified) + '</span></li>\n'
page_list = page_list + '</ul>'
else:
with open(page_list_item_file, 'r') as f:
page_list_item = f.read()
page_list = ""
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
p_content = convert_text(os.path.join(root, filename))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + page_list_item.replace("{path}", subfolder_link + newFilename).replace("{name}", newFilename2).replace(
"{date}", str(file_modified)).replace("{content}", p_content).replace("{content_short}", p_content[:250] + "...").replace("{day}", file_modified_day).replace("{month}", file_modified_month).replace("{month_name}", month_name).replace("{year}", file_modified_year)
if home_page_list == "yes" or home_page_list:
# Open the home page file (index.html) for writing
home_working_file = open(os.path.join(cwd, outdir, "index.html"), "w")
home_working_file.write(header_file.read())
# Make sure there is actually a home page template file
home_templ_dir = os.path.join(cwd, "templates", "home_page.html")
if os.path.exists(home_templ_dir):
home_templ_file = open(home_templ_dir, "r")
home_working_file.write(home_templ_file.read())
else:
print("\nNo home page template file found. Writing page list to index.html")
home_working_file.write(page_list)
home_working_file.write(footer_file.read())
home_working_file.close()
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
if not filename.startswith("_"):
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
newFilename = get_html_filename(filename)
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
currents_working_file = open(
os.path.join(cwd, outdir, newFilename), "w")
else:
create_folder(os.path.join(cwd, outdir, subfolder))
currents_working_file = open(os.path.join(
cwd, outdir, subfolder, newFilename), "w")
# Write the header
currents_working_file.write(header_file.read())
text_cont1 = convert_text(os.path.join(root, filename))
if "+++++" in text_cont1.splitlines()[1]:
page_template_file = text_cont1.splitlines()[0]
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[0], "")
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[1], "")
else:
page_template_file = "content_page"
# Write the text content into the content template and onto the
# build file
content_templ_dir = os.path.join(
cwd, "templates", page_template_file + ".html")
if os.path.exists(content_templ_dir):
content_templ_file = open(content_templ_dir, "r")
content_templ_file1 = content_templ_file.read()
content_templ_file2 = content_templ_file1.replace(
"{page_content}", text_cont1)
currents_working_file.write(content_templ_file2)
else:
currents_working_file.write(text_cont1)
# Write the footer to the build file
currents_working_file.write("\n" + footer_file.read())
# Close the build file
currents_working_file.close()
# Find all the nav(something) templates in the `templates` folder and
# Read their content to the dict
navs = {}
for file in os.listdir(os.path.join(cwd, "templates")):
if "nav" in file:
nav_cont = open(os.path.join(cwd, "templates", file), "r")
navs[file.replace(".html", "")] = nav_cont.read()
nav_cont.close()
forbidden_dirs = set(["assets", "templates"])
blended_version_message = "Built with Blended v" + \
str(app_version)
build_date = str(datetime.now().date())
build_time = str(datetime.now().time())
build_datetime = str(datetime.now())
# Replace global variables such as site name and language
for root, dirs, files in os.walk(os.path.join(cwd, outdir)):
dirs[:] = [d for d in dirs if d not in forbidden_dirs]
for filename in files:
if filename != "config.pyc" and filename != "config.py":
newFilename = get_html_clear_filename(filename)
page_file = filename.replace(".html", "")
page_folder = os.path.basename(os.path.dirname(os.path.join(
root, filename))).replace("-", "").replace("_", "").title()
page_folder_orig = os.path.basename(
os.path.dirname(os.path.join(root, filename)))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, outdir), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_folder = os.path.join(cwd, outdir, filename)
else:
subfolder_folder = os.path.join(
cwd, outdir, subfolder, filename)
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
# The Loop!
for line in fileinput.input(subfolder_folder, inplace=1):
for var in custom_variables:
line = line.replace(
"{" + var + "}", custom_variables[var])
if len(plugins) != 0:
for i in range(len(plugins)):
if sys.version_info[0] < 2:
main = importlib.import_module(plugins[i])
elif sys.version_info[0] < 3:
main = __import__(plugins[i])
content = main.main()
line = line.replace(
"{" + plugins[i] + "}", content)
if "{nav" in line:
navname = line.split("{")[1].split("}")[0]
line = line.replace(
"{" + navname + "}", navs[(line.split("{"))[1].split("}")[0]])
line = line.replace(
"{website_description}", website_description)
line = line.replace(
"{website_description_long}", website_description_long)
line = line.replace("{website_license}", website_license)
line = line.replace("{website_language}", website_language)
line = line.replace("{website_url}", website_url)
line = line.replace("{author_name}", author_name)
line = line.replace("{author_bio}", author_bio)
line = line.replace("{random_number}",
str(randint(0, 100000000)))
line = line.replace("{build_date}", build_date)
line = line.replace("{build_time}", build_time)
line = line.replace("{build_datetime}", build_datetime)
line = line.replace("{page_list}", page_list)
line = line.replace("{page_name}", newFilename)
line = line.replace("{page_filename}", page_file)
line = line.replace("{page_file}", filename)
line = line.replace("{" + filename + "_active}", "active")
if page_folder != outdir.title():
line = line.replace("{page_folder}", page_folder)
else:
line = line.replace("{page_folder}", "")
if page_folder_orig != outdir:
line = line.replace(
"{page_folder_orig}", page_folder_orig)
else:
line = line.replace("{page_folder_orig}", "")
line = line.replace("{page_date}", str(file_modified))
line = line.replace("{page_day}", str(file_modified_day))
line = line.replace("{page_year}", str(file_modified_year))
line = line.replace(
"{page_month}", str(file_modified_month))
line = line.replace(
"{page_month_name}", str(month_name))
line = line.replace("{blended_version}", str(app_version))
line = line.replace(
"{blended_version_message}", blended_version_message)
line = line.replace("{website_name}", website_name)
top = os.path.join(cwd, outdir)
startinglevel = top.count(os.sep)
relative_path = ""
level = root.count(os.sep) - startinglevel
for i in range(level):
relative_path = relative_path + "../"
line = line.replace("{relative_root}", relative_path)
print(line.rstrip('\n'))
fileinput.close()
# Copy the asset folder to the build folder
if os.path.exists(os.path.join(cwd, "templates", "assets")):
if os.path.exists(os.path.join(cwd, outdir, "assets")):
shutil.rmtree(os.path.join(cwd, outdir, "assets"))
shutil.copytree(os.path.join(cwd, "templates", "assets"),
os.path.join(cwd, outdir, "assets"))
for root, dirs, files in os.walk(os.path.join(cwd, outdir, "assets")):
for file in files:
if not file.startswith("_"):
if (file.endswith(".sass")) or (file.endswith(".scss")):
sass_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if sass_text != "":
text_file.write(sass.compile(string=sass_text))
else:
print(file + " is empty! Not compiling Sass.")
text_file.close()
if file.endswith(".less"):
less_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if less_text != "":
text_file.write(lesscpy.compile(StringIO(less_text)))
else:
print(file + " is empty! Not compiling Less.")
text_file.close()
if file.endswith(".styl"):
try:
styl_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if styl_text != "":
text_file.write(Stylus().compile(styl_text))
else:
print(file + " is empty! Not compiling Styl.")
text_file.close()
except:
print("Not able to build with Stylus! Is it installed?")
try:
subprocess.call["npm", "install", "-g", "stylus"]
except:
print("NPM (NodeJS) not working. Is it installed?")
if file.endswith(".coffee"):
coffee_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file[:-6] + "js"), "w")
if coffee_text != "":
text_file.write(coffeescript.compile(coffee_text))
else:
print(file + " is empty! Not compiling CoffeeScript.")
text_file.close()
if minify_css:
if file.endswith(".css"):
css_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if css_text != "":
text_file.write(cssmin(css_text))
text_file.close()
if minify_js:
if file.endswith(".js"):
js_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if js_text != "":
text_file.write(jsmin(js_text))
text_file.close()
@cli.command('build', short_help='Build the Blended files into a website')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def build(outdir):
"""Blends the generated files and outputs a HTML website"""
print("Building your Blended files into a website!")
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("The files are built! You can find them in the " + outdir +
"/ directory. Run the view command to see what you have created in a web browser.")
outdir_type = "build"
class Watcher:
"""Watch the specified dirs for changes"""
DIRECTORY_TO_WATCH = os.path.join(cwd, "content")
def __init__(self):
self.observer = Observer()
def run(self):
"""Run the builder on changes"""
event_handler = Handler()
threads = []
paths = [os.path.join(cwd, "content"), os.path.join(cwd, "templates")]
for i in paths:
targetPath = str(i)
self.observer.schedule(event_handler, targetPath, recursive=True)
threads.append(self.observer)
self.observer.start()
try:
while True:
time.sleep(5)
except:
self.observer.stop()
print("\nObserver stopped.")
self.observer.join()
class Handler(FileSystemEventHandler):
"""The handler for the file change watcher"""
@staticmethod
def on_any_event(event):
global outdir_type
if event.is_directory:
return None
elif event.event_type == 'created':
# Take any action here when a file is first created.
build_files(outdir_type)
print("%s created" % event.src_path)
elif event.event_type == 'modified':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s modified" % event.src_path)
elif event.event_type == 'deleted':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s deleted" % event.src_path)
@cli.command('interactive', short_help='Build the Blended files into a website on each file change')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def interactive(outdir):
"""Blends the generated files and outputs a HTML website on file change"""
print("Building your Blended files into a website!")
global outdir_type
outdir_type = outdir
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("Watching the content and templates directories for changes, press CTRL+C to stop...\n")
w = Watcher()
w.run()
@cli.command('view', short_help='View the finished Blended website')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def view(outdir):
"""Opens the built index.html file in a web browser"""
index_path = os.path.realpath(os.path.join(cwd, outdir, "index.html"))
if os.path.exists(index_path):
webbrowser.open('file://' + index_path)
else:
print("The index.html file could not be found in the " + outdir +
"/ folder! Have you deleted it or have you built with home_page_list set to 'no' in config.py?")
if __name__ == '__main__':
cli()
|
BlendedSiteGenerator/Blended | blended/__main__.py | placeFiles | python | def placeFiles(ftp, path):
for name in os.listdir(path):
if name != "config.py" and name != "config.pyc" and name != "templates" and name != "content":
localpath = os.path.join(path, name)
if os.path.isfile(localpath):
print("STOR", name, localpath)
ftp.storbinary('STOR ' + name, open(localpath, 'rb'))
elif os.path.isdir(localpath):
print("MKD", name)
try:
ftp.mkd(name)
# ignore "directory already exists"
except error_perm as e:
if not e.args[0].startswith('550'):
raise
print("CWD", name)
ftp.cwd(name)
placeFiles(ftp, localpath)
print("CWD", "..")
ftp.cwd("..") | Upload the built files to FTP | train | https://github.com/BlendedSiteGenerator/Blended/blob/e5865a8633e461a22c86ef6ee98cdd7051c412ac/blended/__main__.py#L175-L198 | null | """This is the Blended Static Website Generator"""
# encoding=utf8
import os
import os.path
import sys
from sys import platform
import shutil
import fileinput
import webbrowser
from datetime import datetime
from random import randint
from ftplib import FTP, error_perm
import time
import calendar
import subprocess
import importlib
import click
import pkg_resources
import markdown
import textile
from docutils.core import publish_parts
import mammoth
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import sass
import pyjade
import lesscpy
from six import StringIO
from stylus import Stylus
import coffeescript
from jsmin import jsmin
from cssmin import cssmin
import pip
from .functions import create_folder, replace_folder, get_html_filename, get_html_clear_filename, getunzipped, checkConfig, createConfig, createBlendedFolders, parseXML
# Very important, get the directory that the user wants to run commands in
cwd = os.getcwd()
try:
app_version = pkg_resources.require("blended")[0].version
app_version = app_version[:3]
except:
app_version = "NOTSET"
print("WARNING: app_version not set.\n")
@click.group()
def cli():
"""Blended: Static Website Generator"""
@cli.command('info', short_help='Show info about Blended and the current project.')
def version():
"""Prints info about Blended"""
print("You are running Blended v" + app_version)
print("The current working directory is " + cwd)
@cli.command('install-template', short_help='Install a Blended template from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_template(username, repo):
"""Installs a Blended template from GitHub"""
print("Installing template from " + username + "/" + repo)
dpath = os.path.join(cwd, "templates")
getunzipped(username, repo, dpath)
@cli.command('import-wp', short_help='Import a site from WordPress')
@click.option('--filepath', prompt='WordPress export file', help='Which file holds the exported data from WordPress')
def import_wp(filepath):
"""Imports A WordPress export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from WordPress...")
wp = parseXML(filepath)
wname = wp.rss.channel.title.cdata
wdesc = wp.rss.channel.description.cdata
wlan = wp.rss.channel.language.cdata
wurl = wp.rss.channel.link.cdata
aname = wp.rss.channel.wp_author.wp_author_display_name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlan=wlan, wurl=wurl, aname=aname)
for item in wp.rss.channel.item:
with open(os.path.join(cwd, "content", item.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(item.content_encoded.cdata.strip())
print("\nYour website has been imported from WordPress.")
@cli.command('import-blogger', short_help='Import a site from Blogger')
@click.option('--filepath', prompt='Blogger export file', help='Which file holds the exported data from Blogger')
def import_blogger(filepath):
"""Imports A Blogger export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from Blogger...")
blogger = parseXML(filepath)
wname = blogger.feed.title.cdata
aname = blogger.feed.author.name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname, aname=aname)
for entry in blogger.feed.entry:
if "post" in entry.id.cdata:
with open(os.path.join(cwd, "content", entry.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(entry.content.cdata.strip())
print("\nYour website has been imported from Blogger.")
@cli.command('install-plugin', short_help='Install a Blended plugin from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_plugin(username, repo):
"""Installs a Blended plugin from GitHub"""
print("Installing plugin from " + username + "/" + repo)
pip.main(['install', '-U', "git+git://github.com/" +
username + "/" + repo + ".git"])
@cli.command('init', short_help='Initiate a new website')
def init():
"""Initiates a new website"""
print("Blended: Static Website Generator -\n")
checkConfig()
if (sys.version_info > (3, 0)):
wname = input("Website Name: ")
wdesc = input("Website Description: ")
wlan = input("Website Language: ")
wlic = input("Website License: ")
aname = input("Author(s) Name(s): ")
else:
wname = raw_input("Website Name: ")
wdesc = raw_input("Website Description: ")
wlan = raw_input("Website Language: ")
wlic = raw_input("Website License: ")
aname = raw_input("Author(s) Name(s): ")
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlic=wlic, wlan=wlan, aname=aname)
print("\nThe required files for your website have been generated.")
@cli.command('ftp', short_help='Upload the files via ftp')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def send_ftp(outdir):
"""Upload the built website to FTP"""
print("Uploading the files in the " + outdir + "/ directory!\n")
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import ftp_server, ftp_username, ftp_password, ftp_port, ftp_upload_path
except:
sys.exit(
"The FTP settings could not be found. Maybe your config file is too old. Re-run 'blended init' to fix it.")
server = ftp_server
username = ftp_username
password = ftp_password
port = ftp_port
ftp = FTP()
ftp.connect(server, port)
ftp.login(username, password)
filenameCV = os.path.join(cwd, outdir)
try:
ftp.cwd(ftp_upload_path)
placeFiles(ftp, filenameCV)
except:
ftp.quit()
sys.exit("Files not able to be uploaded! Are you sure the directory exists?")
ftp.quit()
print("\nFTP Done!")
@cli.command('clean', short_help='Remove the build folder')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def clean_built(outdir):
"""Removes all built files"""
print("Removing the built files!")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
@cli.command('zip', short_help='Package the build folder into a zip file')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def zip_built(outdir):
"""Packages the build folder into a zip"""
print("Zipping the built files!")
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name
except:
sys.exit(
"Some of the configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
zip_dir = os.path.join(cwd, website_name.replace(" ", "_") + "-build-" +
str(datetime.now().date()))
if os.path.exists(build_dir):
shutil.make_archive(zip_dir, 'zip', build_dir)
else:
print("The " + outdir +
"/ folder could not be found! Have you run 'blended build' yet?")
@cli.command('purge', short_help='Purge all the files created by Blended')
def purge():
"""Removes all files generated by Blended"""
print("Purging the Blended files!")
# Remove the templates folder
templ_dir = os.path.join(cwd, "templates")
if os.path.exists(templ_dir):
shutil.rmtree(templ_dir)
# Remove the content folder
cont_dir = os.path.join(cwd, "content")
if os.path.exists(cont_dir):
shutil.rmtree(cont_dir)
# Remove the build folder
build_dir = os.path.join(cwd, "build")
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
# Remove config.py
config_file_dir = os.path.join(cwd, "config.py")
if os.path.exists(config_file_dir):
os.remove(config_file_dir)
# Remove config.pyc
config2_file_dir = os.path.join(cwd, "config.pyc")
if os.path.exists(config2_file_dir):
os.remove(config2_file_dir)
# Remove config.py
config3_file_dir = os.path.join(cwd, "config.py.oldbak")
if os.path.exists(config3_file_dir):
os.remove(config3_file_dir)
def convert_text(filename):
"""Convert the post/page content using the converters"""
text_content = open(filename, "r")
if ".md" in filename:
text_cont1 = "\n" + markdown.markdown(text_content.read()) + "\n"
elif ".docx" in filename:
with open(os.path.join(cwd, "content", filename), "rb") as docx_file:
result = mammoth.convert_to_html(docx_file)
final_docx_html = result.value
text_cont1 = "\n" + final_docx_html + "\n"
elif ".tile" in filename:
text_cont1 = "\n" + textile.textile(text_content.read()) + "\n"
elif ".jade" in filename:
text_cont1 = "\n" + pyjade.simple_convert(text_content.read()) + "\n"
elif ".rst" in filename:
text_cont1 = "\n" + \
publish_parts(text_content.read(), writer_name='html')[
'html_body'] + "\n"
elif ".html" in filename:
text_cont1 = text_content.read()
elif ".txt" in filename:
text_cont1 = text_content.read()
else:
print(filename + " is not a valid file type!")
text_cont1 = "NULL"
return text_cont1 + "\n\n"
def build_files(outdir):
"""Build the files!"""
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name, website_description, website_language, home_page_list
except:
sys.exit(
"ERROR: Some of the crucial configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
try:
from config import website_description_long, website_license, website_url, author_name, author_bio, plugins, minify_css, minify_js, custom_variables
except:
website_description_long = ""
website_license = ""
website_url = ""
author_name = ""
author_bio = ""
plugins = []
custom_variables = {}
minify_css = False
minify_js = False
print("WARNING: Some of the optional configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.\n")
# Create the build folder
build_dir = os.path.join(cwd, outdir)
if "." not in outdir and ".." not in outdir and "..." not in outdir and "...." not in outdir and "....." not in outdir:
replace_folder(build_dir)
# Make sure there is actually a header template file
header_file_dir = os.path.join(cwd, "templates", "header.html")
if not os.path.exists(header_file_dir):
sys.exit(
"There dosen't seem to be a header template file. You need one to generate.")
# Make sure there is actually a footer template file
footer_file_dir = os.path.join(cwd, "templates", "footer.html")
if not os.path.exists(footer_file_dir):
sys.exit(
"There dosen't seem to be a footer template file. You need one to generate.")
# Open the header and footer files for reading
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
# Create the HTML page listing
page_list_item_file = os.path.join(cwd, "templates", "page_list_item.html")
if not os.path.exists(page_list_item_file):
page_list = '<ul class="page-list">\n'
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
for filename in files:
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + '<li class="page-list-item"><a href="' + subfolder_link + newFilename + \
'">' + newFilename2 + '</a><span class="page-list-item-time"> - ' + \
str(file_modified) + '</span></li>\n'
page_list = page_list + '</ul>'
else:
with open(page_list_item_file, 'r') as f:
page_list_item = f.read()
page_list = ""
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
p_content = convert_text(os.path.join(root, filename))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + page_list_item.replace("{path}", subfolder_link + newFilename).replace("{name}", newFilename2).replace(
"{date}", str(file_modified)).replace("{content}", p_content).replace("{content_short}", p_content[:250] + "...").replace("{day}", file_modified_day).replace("{month}", file_modified_month).replace("{month_name}", month_name).replace("{year}", file_modified_year)
if home_page_list == "yes" or home_page_list:
# Open the home page file (index.html) for writing
home_working_file = open(os.path.join(cwd, outdir, "index.html"), "w")
home_working_file.write(header_file.read())
# Make sure there is actually a home page template file
home_templ_dir = os.path.join(cwd, "templates", "home_page.html")
if os.path.exists(home_templ_dir):
home_templ_file = open(home_templ_dir, "r")
home_working_file.write(home_templ_file.read())
else:
print("\nNo home page template file found. Writing page list to index.html")
home_working_file.write(page_list)
home_working_file.write(footer_file.read())
home_working_file.close()
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
if not filename.startswith("_"):
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
newFilename = get_html_filename(filename)
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
currents_working_file = open(
os.path.join(cwd, outdir, newFilename), "w")
else:
create_folder(os.path.join(cwd, outdir, subfolder))
currents_working_file = open(os.path.join(
cwd, outdir, subfolder, newFilename), "w")
# Write the header
currents_working_file.write(header_file.read())
text_cont1 = convert_text(os.path.join(root, filename))
if "+++++" in text_cont1.splitlines()[1]:
page_template_file = text_cont1.splitlines()[0]
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[0], "")
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[1], "")
else:
page_template_file = "content_page"
# Write the text content into the content template and onto the
# build file
content_templ_dir = os.path.join(
cwd, "templates", page_template_file + ".html")
if os.path.exists(content_templ_dir):
content_templ_file = open(content_templ_dir, "r")
content_templ_file1 = content_templ_file.read()
content_templ_file2 = content_templ_file1.replace(
"{page_content}", text_cont1)
currents_working_file.write(content_templ_file2)
else:
currents_working_file.write(text_cont1)
# Write the footer to the build file
currents_working_file.write("\n" + footer_file.read())
# Close the build file
currents_working_file.close()
# Find all the nav(something) templates in the `templates` folder and
# Read their content to the dict
navs = {}
for file in os.listdir(os.path.join(cwd, "templates")):
if "nav" in file:
nav_cont = open(os.path.join(cwd, "templates", file), "r")
navs[file.replace(".html", "")] = nav_cont.read()
nav_cont.close()
forbidden_dirs = set(["assets", "templates"])
blended_version_message = "Built with Blended v" + \
str(app_version)
build_date = str(datetime.now().date())
build_time = str(datetime.now().time())
build_datetime = str(datetime.now())
# Replace global variables such as site name and language
for root, dirs, files in os.walk(os.path.join(cwd, outdir)):
dirs[:] = [d for d in dirs if d not in forbidden_dirs]
for filename in files:
if filename != "config.pyc" and filename != "config.py":
newFilename = get_html_clear_filename(filename)
page_file = filename.replace(".html", "")
page_folder = os.path.basename(os.path.dirname(os.path.join(
root, filename))).replace("-", "").replace("_", "").title()
page_folder_orig = os.path.basename(
os.path.dirname(os.path.join(root, filename)))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, outdir), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_folder = os.path.join(cwd, outdir, filename)
else:
subfolder_folder = os.path.join(
cwd, outdir, subfolder, filename)
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
# The Loop!
for line in fileinput.input(subfolder_folder, inplace=1):
for var in custom_variables:
line = line.replace(
"{" + var + "}", custom_variables[var])
if len(plugins) != 0:
for i in range(len(plugins)):
if sys.version_info[0] < 2:
main = importlib.import_module(plugins[i])
elif sys.version_info[0] < 3:
main = __import__(plugins[i])
content = main.main()
line = line.replace(
"{" + plugins[i] + "}", content)
if "{nav" in line:
navname = line.split("{")[1].split("}")[0]
line = line.replace(
"{" + navname + "}", navs[(line.split("{"))[1].split("}")[0]])
line = line.replace(
"{website_description}", website_description)
line = line.replace(
"{website_description_long}", website_description_long)
line = line.replace("{website_license}", website_license)
line = line.replace("{website_language}", website_language)
line = line.replace("{website_url}", website_url)
line = line.replace("{author_name}", author_name)
line = line.replace("{author_bio}", author_bio)
line = line.replace("{random_number}",
str(randint(0, 100000000)))
line = line.replace("{build_date}", build_date)
line = line.replace("{build_time}", build_time)
line = line.replace("{build_datetime}", build_datetime)
line = line.replace("{page_list}", page_list)
line = line.replace("{page_name}", newFilename)
line = line.replace("{page_filename}", page_file)
line = line.replace("{page_file}", filename)
line = line.replace("{" + filename + "_active}", "active")
if page_folder != outdir.title():
line = line.replace("{page_folder}", page_folder)
else:
line = line.replace("{page_folder}", "")
if page_folder_orig != outdir:
line = line.replace(
"{page_folder_orig}", page_folder_orig)
else:
line = line.replace("{page_folder_orig}", "")
line = line.replace("{page_date}", str(file_modified))
line = line.replace("{page_day}", str(file_modified_day))
line = line.replace("{page_year}", str(file_modified_year))
line = line.replace(
"{page_month}", str(file_modified_month))
line = line.replace(
"{page_month_name}", str(month_name))
line = line.replace("{blended_version}", str(app_version))
line = line.replace(
"{blended_version_message}", blended_version_message)
line = line.replace("{website_name}", website_name)
top = os.path.join(cwd, outdir)
startinglevel = top.count(os.sep)
relative_path = ""
level = root.count(os.sep) - startinglevel
for i in range(level):
relative_path = relative_path + "../"
line = line.replace("{relative_root}", relative_path)
print(line.rstrip('\n'))
fileinput.close()
# Copy the asset folder to the build folder
if os.path.exists(os.path.join(cwd, "templates", "assets")):
if os.path.exists(os.path.join(cwd, outdir, "assets")):
shutil.rmtree(os.path.join(cwd, outdir, "assets"))
shutil.copytree(os.path.join(cwd, "templates", "assets"),
os.path.join(cwd, outdir, "assets"))
for root, dirs, files in os.walk(os.path.join(cwd, outdir, "assets")):
for file in files:
if not file.startswith("_"):
if (file.endswith(".sass")) or (file.endswith(".scss")):
sass_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if sass_text != "":
text_file.write(sass.compile(string=sass_text))
else:
print(file + " is empty! Not compiling Sass.")
text_file.close()
if file.endswith(".less"):
less_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if less_text != "":
text_file.write(lesscpy.compile(StringIO(less_text)))
else:
print(file + " is empty! Not compiling Less.")
text_file.close()
if file.endswith(".styl"):
try:
styl_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if styl_text != "":
text_file.write(Stylus().compile(styl_text))
else:
print(file + " is empty! Not compiling Styl.")
text_file.close()
except:
print("Not able to build with Stylus! Is it installed?")
try:
subprocess.call["npm", "install", "-g", "stylus"]
except:
print("NPM (NodeJS) not working. Is it installed?")
if file.endswith(".coffee"):
coffee_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file[:-6] + "js"), "w")
if coffee_text != "":
text_file.write(coffeescript.compile(coffee_text))
else:
print(file + " is empty! Not compiling CoffeeScript.")
text_file.close()
if minify_css:
if file.endswith(".css"):
css_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if css_text != "":
text_file.write(cssmin(css_text))
text_file.close()
if minify_js:
if file.endswith(".js"):
js_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if js_text != "":
text_file.write(jsmin(js_text))
text_file.close()
@cli.command('build', short_help='Build the Blended files into a website')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def build(outdir):
"""Blends the generated files and outputs a HTML website"""
print("Building your Blended files into a website!")
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("The files are built! You can find them in the " + outdir +
"/ directory. Run the view command to see what you have created in a web browser.")
outdir_type = "build"
class Watcher:
"""Watch the specified dirs for changes"""
DIRECTORY_TO_WATCH = os.path.join(cwd, "content")
def __init__(self):
self.observer = Observer()
def run(self):
"""Run the builder on changes"""
event_handler = Handler()
threads = []
paths = [os.path.join(cwd, "content"), os.path.join(cwd, "templates")]
for i in paths:
targetPath = str(i)
self.observer.schedule(event_handler, targetPath, recursive=True)
threads.append(self.observer)
self.observer.start()
try:
while True:
time.sleep(5)
except:
self.observer.stop()
print("\nObserver stopped.")
self.observer.join()
class Handler(FileSystemEventHandler):
"""The handler for the file change watcher"""
@staticmethod
def on_any_event(event):
global outdir_type
if event.is_directory:
return None
elif event.event_type == 'created':
# Take any action here when a file is first created.
build_files(outdir_type)
print("%s created" % event.src_path)
elif event.event_type == 'modified':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s modified" % event.src_path)
elif event.event_type == 'deleted':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s deleted" % event.src_path)
@cli.command('interactive', short_help='Build the Blended files into a website on each file change')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def interactive(outdir):
"""Blends the generated files and outputs a HTML website on file change"""
print("Building your Blended files into a website!")
global outdir_type
outdir_type = outdir
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("Watching the content and templates directories for changes, press CTRL+C to stop...\n")
w = Watcher()
w.run()
@cli.command('view', short_help='View the finished Blended website')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def view(outdir):
"""Opens the built index.html file in a web browser"""
index_path = os.path.realpath(os.path.join(cwd, outdir, "index.html"))
if os.path.exists(index_path):
webbrowser.open('file://' + index_path)
else:
print("The index.html file could not be found in the " + outdir +
"/ folder! Have you deleted it or have you built with home_page_list set to 'no' in config.py?")
if __name__ == '__main__':
cli()
|
BlendedSiteGenerator/Blended | blended/__main__.py | send_ftp | python | def send_ftp(outdir):
print("Uploading the files in the " + outdir + "/ directory!\n")
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import ftp_server, ftp_username, ftp_password, ftp_port, ftp_upload_path
except:
sys.exit(
"The FTP settings could not be found. Maybe your config file is too old. Re-run 'blended init' to fix it.")
server = ftp_server
username = ftp_username
password = ftp_password
port = ftp_port
ftp = FTP()
ftp.connect(server, port)
ftp.login(username, password)
filenameCV = os.path.join(cwd, outdir)
try:
ftp.cwd(ftp_upload_path)
placeFiles(ftp, filenameCV)
except:
ftp.quit()
sys.exit("Files not able to be uploaded! Are you sure the directory exists?")
ftp.quit()
print("\nFTP Done!") | Upload the built website to FTP | train | https://github.com/BlendedSiteGenerator/Blended/blob/e5865a8633e461a22c86ef6ee98cdd7051c412ac/blended/__main__.py#L203-L239 | [
"def placeFiles(ftp, path):\n \"\"\"Upload the built files to FTP\"\"\"\n for name in os.listdir(path):\n if name != \"config.py\" and name != \"config.pyc\" and name != \"templates\" and name != \"content\":\n localpath = os.path.join(path, name)\n if os.path.isfile(localpath):\n... | """This is the Blended Static Website Generator"""
# encoding=utf8
import os
import os.path
import sys
from sys import platform
import shutil
import fileinput
import webbrowser
from datetime import datetime
from random import randint
from ftplib import FTP, error_perm
import time
import calendar
import subprocess
import importlib
import click
import pkg_resources
import markdown
import textile
from docutils.core import publish_parts
import mammoth
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import sass
import pyjade
import lesscpy
from six import StringIO
from stylus import Stylus
import coffeescript
from jsmin import jsmin
from cssmin import cssmin
import pip
from .functions import create_folder, replace_folder, get_html_filename, get_html_clear_filename, getunzipped, checkConfig, createConfig, createBlendedFolders, parseXML
# Very important, get the directory that the user wants to run commands in
cwd = os.getcwd()
try:
app_version = pkg_resources.require("blended")[0].version
app_version = app_version[:3]
except:
app_version = "NOTSET"
print("WARNING: app_version not set.\n")
@click.group()
def cli():
"""Blended: Static Website Generator"""
@cli.command('info', short_help='Show info about Blended and the current project.')
def version():
"""Prints info about Blended"""
print("You are running Blended v" + app_version)
print("The current working directory is " + cwd)
@cli.command('install-template', short_help='Install a Blended template from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_template(username, repo):
"""Installs a Blended template from GitHub"""
print("Installing template from " + username + "/" + repo)
dpath = os.path.join(cwd, "templates")
getunzipped(username, repo, dpath)
@cli.command('import-wp', short_help='Import a site from WordPress')
@click.option('--filepath', prompt='WordPress export file', help='Which file holds the exported data from WordPress')
def import_wp(filepath):
"""Imports A WordPress export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from WordPress...")
wp = parseXML(filepath)
wname = wp.rss.channel.title.cdata
wdesc = wp.rss.channel.description.cdata
wlan = wp.rss.channel.language.cdata
wurl = wp.rss.channel.link.cdata
aname = wp.rss.channel.wp_author.wp_author_display_name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlan=wlan, wurl=wurl, aname=aname)
for item in wp.rss.channel.item:
with open(os.path.join(cwd, "content", item.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(item.content_encoded.cdata.strip())
print("\nYour website has been imported from WordPress.")
@cli.command('import-blogger', short_help='Import a site from Blogger')
@click.option('--filepath', prompt='Blogger export file', help='Which file holds the exported data from Blogger')
def import_blogger(filepath):
"""Imports A Blogger export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from Blogger...")
blogger = parseXML(filepath)
wname = blogger.feed.title.cdata
aname = blogger.feed.author.name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname, aname=aname)
for entry in blogger.feed.entry:
if "post" in entry.id.cdata:
with open(os.path.join(cwd, "content", entry.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(entry.content.cdata.strip())
print("\nYour website has been imported from Blogger.")
@cli.command('install-plugin', short_help='Install a Blended plugin from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_plugin(username, repo):
"""Installs a Blended plugin from GitHub"""
print("Installing plugin from " + username + "/" + repo)
pip.main(['install', '-U', "git+git://github.com/" +
username + "/" + repo + ".git"])
@cli.command('init', short_help='Initiate a new website')
def init():
"""Initiates a new website"""
print("Blended: Static Website Generator -\n")
checkConfig()
if (sys.version_info > (3, 0)):
wname = input("Website Name: ")
wdesc = input("Website Description: ")
wlan = input("Website Language: ")
wlic = input("Website License: ")
aname = input("Author(s) Name(s): ")
else:
wname = raw_input("Website Name: ")
wdesc = raw_input("Website Description: ")
wlan = raw_input("Website Language: ")
wlic = raw_input("Website License: ")
aname = raw_input("Author(s) Name(s): ")
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlic=wlic, wlan=wlan, aname=aname)
print("\nThe required files for your website have been generated.")
def placeFiles(ftp, path):
"""Upload the built files to FTP"""
for name in os.listdir(path):
if name != "config.py" and name != "config.pyc" and name != "templates" and name != "content":
localpath = os.path.join(path, name)
if os.path.isfile(localpath):
print("STOR", name, localpath)
ftp.storbinary('STOR ' + name, open(localpath, 'rb'))
elif os.path.isdir(localpath):
print("MKD", name)
try:
ftp.mkd(name)
# ignore "directory already exists"
except error_perm as e:
if not e.args[0].startswith('550'):
raise
print("CWD", name)
ftp.cwd(name)
placeFiles(ftp, localpath)
print("CWD", "..")
ftp.cwd("..")
@cli.command('ftp', short_help='Upload the files via ftp')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
@cli.command('clean', short_help='Remove the build folder')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def clean_built(outdir):
"""Removes all built files"""
print("Removing the built files!")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
@cli.command('zip', short_help='Package the build folder into a zip file')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def zip_built(outdir):
"""Packages the build folder into a zip"""
print("Zipping the built files!")
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name
except:
sys.exit(
"Some of the configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
zip_dir = os.path.join(cwd, website_name.replace(" ", "_") + "-build-" +
str(datetime.now().date()))
if os.path.exists(build_dir):
shutil.make_archive(zip_dir, 'zip', build_dir)
else:
print("The " + outdir +
"/ folder could not be found! Have you run 'blended build' yet?")
@cli.command('purge', short_help='Purge all the files created by Blended')
def purge():
"""Removes all files generated by Blended"""
print("Purging the Blended files!")
# Remove the templates folder
templ_dir = os.path.join(cwd, "templates")
if os.path.exists(templ_dir):
shutil.rmtree(templ_dir)
# Remove the content folder
cont_dir = os.path.join(cwd, "content")
if os.path.exists(cont_dir):
shutil.rmtree(cont_dir)
# Remove the build folder
build_dir = os.path.join(cwd, "build")
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
# Remove config.py
config_file_dir = os.path.join(cwd, "config.py")
if os.path.exists(config_file_dir):
os.remove(config_file_dir)
# Remove config.pyc
config2_file_dir = os.path.join(cwd, "config.pyc")
if os.path.exists(config2_file_dir):
os.remove(config2_file_dir)
# Remove config.py
config3_file_dir = os.path.join(cwd, "config.py.oldbak")
if os.path.exists(config3_file_dir):
os.remove(config3_file_dir)
def convert_text(filename):
"""Convert the post/page content using the converters"""
text_content = open(filename, "r")
if ".md" in filename:
text_cont1 = "\n" + markdown.markdown(text_content.read()) + "\n"
elif ".docx" in filename:
with open(os.path.join(cwd, "content", filename), "rb") as docx_file:
result = mammoth.convert_to_html(docx_file)
final_docx_html = result.value
text_cont1 = "\n" + final_docx_html + "\n"
elif ".tile" in filename:
text_cont1 = "\n" + textile.textile(text_content.read()) + "\n"
elif ".jade" in filename:
text_cont1 = "\n" + pyjade.simple_convert(text_content.read()) + "\n"
elif ".rst" in filename:
text_cont1 = "\n" + \
publish_parts(text_content.read(), writer_name='html')[
'html_body'] + "\n"
elif ".html" in filename:
text_cont1 = text_content.read()
elif ".txt" in filename:
text_cont1 = text_content.read()
else:
print(filename + " is not a valid file type!")
text_cont1 = "NULL"
return text_cont1 + "\n\n"
def build_files(outdir):
"""Build the files!"""
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name, website_description, website_language, home_page_list
except:
sys.exit(
"ERROR: Some of the crucial configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
try:
from config import website_description_long, website_license, website_url, author_name, author_bio, plugins, minify_css, minify_js, custom_variables
except:
website_description_long = ""
website_license = ""
website_url = ""
author_name = ""
author_bio = ""
plugins = []
custom_variables = {}
minify_css = False
minify_js = False
print("WARNING: Some of the optional configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.\n")
# Create the build folder
build_dir = os.path.join(cwd, outdir)
if "." not in outdir and ".." not in outdir and "..." not in outdir and "...." not in outdir and "....." not in outdir:
replace_folder(build_dir)
# Make sure there is actually a header template file
header_file_dir = os.path.join(cwd, "templates", "header.html")
if not os.path.exists(header_file_dir):
sys.exit(
"There dosen't seem to be a header template file. You need one to generate.")
# Make sure there is actually a footer template file
footer_file_dir = os.path.join(cwd, "templates", "footer.html")
if not os.path.exists(footer_file_dir):
sys.exit(
"There dosen't seem to be a footer template file. You need one to generate.")
# Open the header and footer files for reading
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
# Create the HTML page listing
page_list_item_file = os.path.join(cwd, "templates", "page_list_item.html")
if not os.path.exists(page_list_item_file):
page_list = '<ul class="page-list">\n'
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
for filename in files:
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + '<li class="page-list-item"><a href="' + subfolder_link + newFilename + \
'">' + newFilename2 + '</a><span class="page-list-item-time"> - ' + \
str(file_modified) + '</span></li>\n'
page_list = page_list + '</ul>'
else:
with open(page_list_item_file, 'r') as f:
page_list_item = f.read()
page_list = ""
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
p_content = convert_text(os.path.join(root, filename))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + page_list_item.replace("{path}", subfolder_link + newFilename).replace("{name}", newFilename2).replace(
"{date}", str(file_modified)).replace("{content}", p_content).replace("{content_short}", p_content[:250] + "...").replace("{day}", file_modified_day).replace("{month}", file_modified_month).replace("{month_name}", month_name).replace("{year}", file_modified_year)
if home_page_list == "yes" or home_page_list:
# Open the home page file (index.html) for writing
home_working_file = open(os.path.join(cwd, outdir, "index.html"), "w")
home_working_file.write(header_file.read())
# Make sure there is actually a home page template file
home_templ_dir = os.path.join(cwd, "templates", "home_page.html")
if os.path.exists(home_templ_dir):
home_templ_file = open(home_templ_dir, "r")
home_working_file.write(home_templ_file.read())
else:
print("\nNo home page template file found. Writing page list to index.html")
home_working_file.write(page_list)
home_working_file.write(footer_file.read())
home_working_file.close()
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
if not filename.startswith("_"):
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
newFilename = get_html_filename(filename)
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
currents_working_file = open(
os.path.join(cwd, outdir, newFilename), "w")
else:
create_folder(os.path.join(cwd, outdir, subfolder))
currents_working_file = open(os.path.join(
cwd, outdir, subfolder, newFilename), "w")
# Write the header
currents_working_file.write(header_file.read())
text_cont1 = convert_text(os.path.join(root, filename))
if "+++++" in text_cont1.splitlines()[1]:
page_template_file = text_cont1.splitlines()[0]
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[0], "")
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[1], "")
else:
page_template_file = "content_page"
# Write the text content into the content template and onto the
# build file
content_templ_dir = os.path.join(
cwd, "templates", page_template_file + ".html")
if os.path.exists(content_templ_dir):
content_templ_file = open(content_templ_dir, "r")
content_templ_file1 = content_templ_file.read()
content_templ_file2 = content_templ_file1.replace(
"{page_content}", text_cont1)
currents_working_file.write(content_templ_file2)
else:
currents_working_file.write(text_cont1)
# Write the footer to the build file
currents_working_file.write("\n" + footer_file.read())
# Close the build file
currents_working_file.close()
# Find all the nav(something) templates in the `templates` folder and
# Read their content to the dict
navs = {}
for file in os.listdir(os.path.join(cwd, "templates")):
if "nav" in file:
nav_cont = open(os.path.join(cwd, "templates", file), "r")
navs[file.replace(".html", "")] = nav_cont.read()
nav_cont.close()
forbidden_dirs = set(["assets", "templates"])
blended_version_message = "Built with Blended v" + \
str(app_version)
build_date = str(datetime.now().date())
build_time = str(datetime.now().time())
build_datetime = str(datetime.now())
# Replace global variables such as site name and language
for root, dirs, files in os.walk(os.path.join(cwd, outdir)):
dirs[:] = [d for d in dirs if d not in forbidden_dirs]
for filename in files:
if filename != "config.pyc" and filename != "config.py":
newFilename = get_html_clear_filename(filename)
page_file = filename.replace(".html", "")
page_folder = os.path.basename(os.path.dirname(os.path.join(
root, filename))).replace("-", "").replace("_", "").title()
page_folder_orig = os.path.basename(
os.path.dirname(os.path.join(root, filename)))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, outdir), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_folder = os.path.join(cwd, outdir, filename)
else:
subfolder_folder = os.path.join(
cwd, outdir, subfolder, filename)
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
# The Loop!
for line in fileinput.input(subfolder_folder, inplace=1):
for var in custom_variables:
line = line.replace(
"{" + var + "}", custom_variables[var])
if len(plugins) != 0:
for i in range(len(plugins)):
if sys.version_info[0] < 2:
main = importlib.import_module(plugins[i])
elif sys.version_info[0] < 3:
main = __import__(plugins[i])
content = main.main()
line = line.replace(
"{" + plugins[i] + "}", content)
if "{nav" in line:
navname = line.split("{")[1].split("}")[0]
line = line.replace(
"{" + navname + "}", navs[(line.split("{"))[1].split("}")[0]])
line = line.replace(
"{website_description}", website_description)
line = line.replace(
"{website_description_long}", website_description_long)
line = line.replace("{website_license}", website_license)
line = line.replace("{website_language}", website_language)
line = line.replace("{website_url}", website_url)
line = line.replace("{author_name}", author_name)
line = line.replace("{author_bio}", author_bio)
line = line.replace("{random_number}",
str(randint(0, 100000000)))
line = line.replace("{build_date}", build_date)
line = line.replace("{build_time}", build_time)
line = line.replace("{build_datetime}", build_datetime)
line = line.replace("{page_list}", page_list)
line = line.replace("{page_name}", newFilename)
line = line.replace("{page_filename}", page_file)
line = line.replace("{page_file}", filename)
line = line.replace("{" + filename + "_active}", "active")
if page_folder != outdir.title():
line = line.replace("{page_folder}", page_folder)
else:
line = line.replace("{page_folder}", "")
if page_folder_orig != outdir:
line = line.replace(
"{page_folder_orig}", page_folder_orig)
else:
line = line.replace("{page_folder_orig}", "")
line = line.replace("{page_date}", str(file_modified))
line = line.replace("{page_day}", str(file_modified_day))
line = line.replace("{page_year}", str(file_modified_year))
line = line.replace(
"{page_month}", str(file_modified_month))
line = line.replace(
"{page_month_name}", str(month_name))
line = line.replace("{blended_version}", str(app_version))
line = line.replace(
"{blended_version_message}", blended_version_message)
line = line.replace("{website_name}", website_name)
top = os.path.join(cwd, outdir)
startinglevel = top.count(os.sep)
relative_path = ""
level = root.count(os.sep) - startinglevel
for i in range(level):
relative_path = relative_path + "../"
line = line.replace("{relative_root}", relative_path)
print(line.rstrip('\n'))
fileinput.close()
# Copy the asset folder to the build folder
if os.path.exists(os.path.join(cwd, "templates", "assets")):
if os.path.exists(os.path.join(cwd, outdir, "assets")):
shutil.rmtree(os.path.join(cwd, outdir, "assets"))
shutil.copytree(os.path.join(cwd, "templates", "assets"),
os.path.join(cwd, outdir, "assets"))
for root, dirs, files in os.walk(os.path.join(cwd, outdir, "assets")):
for file in files:
if not file.startswith("_"):
if (file.endswith(".sass")) or (file.endswith(".scss")):
sass_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if sass_text != "":
text_file.write(sass.compile(string=sass_text))
else:
print(file + " is empty! Not compiling Sass.")
text_file.close()
if file.endswith(".less"):
less_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if less_text != "":
text_file.write(lesscpy.compile(StringIO(less_text)))
else:
print(file + " is empty! Not compiling Less.")
text_file.close()
if file.endswith(".styl"):
try:
styl_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if styl_text != "":
text_file.write(Stylus().compile(styl_text))
else:
print(file + " is empty! Not compiling Styl.")
text_file.close()
except:
print("Not able to build with Stylus! Is it installed?")
try:
subprocess.call["npm", "install", "-g", "stylus"]
except:
print("NPM (NodeJS) not working. Is it installed?")
if file.endswith(".coffee"):
coffee_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file[:-6] + "js"), "w")
if coffee_text != "":
text_file.write(coffeescript.compile(coffee_text))
else:
print(file + " is empty! Not compiling CoffeeScript.")
text_file.close()
if minify_css:
if file.endswith(".css"):
css_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if css_text != "":
text_file.write(cssmin(css_text))
text_file.close()
if minify_js:
if file.endswith(".js"):
js_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if js_text != "":
text_file.write(jsmin(js_text))
text_file.close()
@cli.command('build', short_help='Build the Blended files into a website')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def build(outdir):
"""Blends the generated files and outputs a HTML website"""
print("Building your Blended files into a website!")
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("The files are built! You can find them in the " + outdir +
"/ directory. Run the view command to see what you have created in a web browser.")
outdir_type = "build"
class Watcher:
"""Watch the specified dirs for changes"""
DIRECTORY_TO_WATCH = os.path.join(cwd, "content")
def __init__(self):
self.observer = Observer()
def run(self):
"""Run the builder on changes"""
event_handler = Handler()
threads = []
paths = [os.path.join(cwd, "content"), os.path.join(cwd, "templates")]
for i in paths:
targetPath = str(i)
self.observer.schedule(event_handler, targetPath, recursive=True)
threads.append(self.observer)
self.observer.start()
try:
while True:
time.sleep(5)
except:
self.observer.stop()
print("\nObserver stopped.")
self.observer.join()
class Handler(FileSystemEventHandler):
"""The handler for the file change watcher"""
@staticmethod
def on_any_event(event):
global outdir_type
if event.is_directory:
return None
elif event.event_type == 'created':
# Take any action here when a file is first created.
build_files(outdir_type)
print("%s created" % event.src_path)
elif event.event_type == 'modified':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s modified" % event.src_path)
elif event.event_type == 'deleted':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s deleted" % event.src_path)
@cli.command('interactive', short_help='Build the Blended files into a website on each file change')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def interactive(outdir):
"""Blends the generated files and outputs a HTML website on file change"""
print("Building your Blended files into a website!")
global outdir_type
outdir_type = outdir
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("Watching the content and templates directories for changes, press CTRL+C to stop...\n")
w = Watcher()
w.run()
@cli.command('view', short_help='View the finished Blended website')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def view(outdir):
"""Opens the built index.html file in a web browser"""
index_path = os.path.realpath(os.path.join(cwd, outdir, "index.html"))
if os.path.exists(index_path):
webbrowser.open('file://' + index_path)
else:
print("The index.html file could not be found in the " + outdir +
"/ folder! Have you deleted it or have you built with home_page_list set to 'no' in config.py?")
if __name__ == '__main__':
cli()
|
BlendedSiteGenerator/Blended | blended/__main__.py | clean_built | python | def clean_built(outdir):
print("Removing the built files!")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
if os.path.exists(build_dir):
shutil.rmtree(build_dir) | Removes all built files | train | https://github.com/BlendedSiteGenerator/Blended/blob/e5865a8633e461a22c86ef6ee98cdd7051c412ac/blended/__main__.py#L244-L251 | null | """This is the Blended Static Website Generator"""
# encoding=utf8
import os
import os.path
import sys
from sys import platform
import shutil
import fileinput
import webbrowser
from datetime import datetime
from random import randint
from ftplib import FTP, error_perm
import time
import calendar
import subprocess
import importlib
import click
import pkg_resources
import markdown
import textile
from docutils.core import publish_parts
import mammoth
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import sass
import pyjade
import lesscpy
from six import StringIO
from stylus import Stylus
import coffeescript
from jsmin import jsmin
from cssmin import cssmin
import pip
from .functions import create_folder, replace_folder, get_html_filename, get_html_clear_filename, getunzipped, checkConfig, createConfig, createBlendedFolders, parseXML
# Very important, get the directory that the user wants to run commands in
cwd = os.getcwd()
try:
app_version = pkg_resources.require("blended")[0].version
app_version = app_version[:3]
except:
app_version = "NOTSET"
print("WARNING: app_version not set.\n")
@click.group()
def cli():
"""Blended: Static Website Generator"""
@cli.command('info', short_help='Show info about Blended and the current project.')
def version():
"""Prints info about Blended"""
print("You are running Blended v" + app_version)
print("The current working directory is " + cwd)
@cli.command('install-template', short_help='Install a Blended template from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_template(username, repo):
"""Installs a Blended template from GitHub"""
print("Installing template from " + username + "/" + repo)
dpath = os.path.join(cwd, "templates")
getunzipped(username, repo, dpath)
@cli.command('import-wp', short_help='Import a site from WordPress')
@click.option('--filepath', prompt='WordPress export file', help='Which file holds the exported data from WordPress')
def import_wp(filepath):
"""Imports A WordPress export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from WordPress...")
wp = parseXML(filepath)
wname = wp.rss.channel.title.cdata
wdesc = wp.rss.channel.description.cdata
wlan = wp.rss.channel.language.cdata
wurl = wp.rss.channel.link.cdata
aname = wp.rss.channel.wp_author.wp_author_display_name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlan=wlan, wurl=wurl, aname=aname)
for item in wp.rss.channel.item:
with open(os.path.join(cwd, "content", item.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(item.content_encoded.cdata.strip())
print("\nYour website has been imported from WordPress.")
@cli.command('import-blogger', short_help='Import a site from Blogger')
@click.option('--filepath', prompt='Blogger export file', help='Which file holds the exported data from Blogger')
def import_blogger(filepath):
"""Imports A Blogger export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from Blogger...")
blogger = parseXML(filepath)
wname = blogger.feed.title.cdata
aname = blogger.feed.author.name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname, aname=aname)
for entry in blogger.feed.entry:
if "post" in entry.id.cdata:
with open(os.path.join(cwd, "content", entry.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(entry.content.cdata.strip())
print("\nYour website has been imported from Blogger.")
@cli.command('install-plugin', short_help='Install a Blended plugin from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_plugin(username, repo):
"""Installs a Blended plugin from GitHub"""
print("Installing plugin from " + username + "/" + repo)
pip.main(['install', '-U', "git+git://github.com/" +
username + "/" + repo + ".git"])
@cli.command('init', short_help='Initiate a new website')
def init():
"""Initiates a new website"""
print("Blended: Static Website Generator -\n")
checkConfig()
if (sys.version_info > (3, 0)):
wname = input("Website Name: ")
wdesc = input("Website Description: ")
wlan = input("Website Language: ")
wlic = input("Website License: ")
aname = input("Author(s) Name(s): ")
else:
wname = raw_input("Website Name: ")
wdesc = raw_input("Website Description: ")
wlan = raw_input("Website Language: ")
wlic = raw_input("Website License: ")
aname = raw_input("Author(s) Name(s): ")
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlic=wlic, wlan=wlan, aname=aname)
print("\nThe required files for your website have been generated.")
def placeFiles(ftp, path):
"""Upload the built files to FTP"""
for name in os.listdir(path):
if name != "config.py" and name != "config.pyc" and name != "templates" and name != "content":
localpath = os.path.join(path, name)
if os.path.isfile(localpath):
print("STOR", name, localpath)
ftp.storbinary('STOR ' + name, open(localpath, 'rb'))
elif os.path.isdir(localpath):
print("MKD", name)
try:
ftp.mkd(name)
# ignore "directory already exists"
except error_perm as e:
if not e.args[0].startswith('550'):
raise
print("CWD", name)
ftp.cwd(name)
placeFiles(ftp, localpath)
print("CWD", "..")
ftp.cwd("..")
@cli.command('ftp', short_help='Upload the files via ftp')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def send_ftp(outdir):
"""Upload the built website to FTP"""
print("Uploading the files in the " + outdir + "/ directory!\n")
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import ftp_server, ftp_username, ftp_password, ftp_port, ftp_upload_path
except:
sys.exit(
"The FTP settings could not be found. Maybe your config file is too old. Re-run 'blended init' to fix it.")
server = ftp_server
username = ftp_username
password = ftp_password
port = ftp_port
ftp = FTP()
ftp.connect(server, port)
ftp.login(username, password)
filenameCV = os.path.join(cwd, outdir)
try:
ftp.cwd(ftp_upload_path)
placeFiles(ftp, filenameCV)
except:
ftp.quit()
sys.exit("Files not able to be uploaded! Are you sure the directory exists?")
ftp.quit()
print("\nFTP Done!")
@cli.command('clean', short_help='Remove the build folder')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
@cli.command('zip', short_help='Package the build folder into a zip file')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def zip_built(outdir):
"""Packages the build folder into a zip"""
print("Zipping the built files!")
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name
except:
sys.exit(
"Some of the configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
zip_dir = os.path.join(cwd, website_name.replace(" ", "_") + "-build-" +
str(datetime.now().date()))
if os.path.exists(build_dir):
shutil.make_archive(zip_dir, 'zip', build_dir)
else:
print("The " + outdir +
"/ folder could not be found! Have you run 'blended build' yet?")
@cli.command('purge', short_help='Purge all the files created by Blended')
def purge():
"""Removes all files generated by Blended"""
print("Purging the Blended files!")
# Remove the templates folder
templ_dir = os.path.join(cwd, "templates")
if os.path.exists(templ_dir):
shutil.rmtree(templ_dir)
# Remove the content folder
cont_dir = os.path.join(cwd, "content")
if os.path.exists(cont_dir):
shutil.rmtree(cont_dir)
# Remove the build folder
build_dir = os.path.join(cwd, "build")
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
# Remove config.py
config_file_dir = os.path.join(cwd, "config.py")
if os.path.exists(config_file_dir):
os.remove(config_file_dir)
# Remove config.pyc
config2_file_dir = os.path.join(cwd, "config.pyc")
if os.path.exists(config2_file_dir):
os.remove(config2_file_dir)
# Remove config.py
config3_file_dir = os.path.join(cwd, "config.py.oldbak")
if os.path.exists(config3_file_dir):
os.remove(config3_file_dir)
def convert_text(filename):
"""Convert the post/page content using the converters"""
text_content = open(filename, "r")
if ".md" in filename:
text_cont1 = "\n" + markdown.markdown(text_content.read()) + "\n"
elif ".docx" in filename:
with open(os.path.join(cwd, "content", filename), "rb") as docx_file:
result = mammoth.convert_to_html(docx_file)
final_docx_html = result.value
text_cont1 = "\n" + final_docx_html + "\n"
elif ".tile" in filename:
text_cont1 = "\n" + textile.textile(text_content.read()) + "\n"
elif ".jade" in filename:
text_cont1 = "\n" + pyjade.simple_convert(text_content.read()) + "\n"
elif ".rst" in filename:
text_cont1 = "\n" + \
publish_parts(text_content.read(), writer_name='html')[
'html_body'] + "\n"
elif ".html" in filename:
text_cont1 = text_content.read()
elif ".txt" in filename:
text_cont1 = text_content.read()
else:
print(filename + " is not a valid file type!")
text_cont1 = "NULL"
return text_cont1 + "\n\n"
def build_files(outdir):
"""Build the files!"""
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name, website_description, website_language, home_page_list
except:
sys.exit(
"ERROR: Some of the crucial configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
try:
from config import website_description_long, website_license, website_url, author_name, author_bio, plugins, minify_css, minify_js, custom_variables
except:
website_description_long = ""
website_license = ""
website_url = ""
author_name = ""
author_bio = ""
plugins = []
custom_variables = {}
minify_css = False
minify_js = False
print("WARNING: Some of the optional configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.\n")
# Create the build folder
build_dir = os.path.join(cwd, outdir)
if "." not in outdir and ".." not in outdir and "..." not in outdir and "...." not in outdir and "....." not in outdir:
replace_folder(build_dir)
# Make sure there is actually a header template file
header_file_dir = os.path.join(cwd, "templates", "header.html")
if not os.path.exists(header_file_dir):
sys.exit(
"There dosen't seem to be a header template file. You need one to generate.")
# Make sure there is actually a footer template file
footer_file_dir = os.path.join(cwd, "templates", "footer.html")
if not os.path.exists(footer_file_dir):
sys.exit(
"There dosen't seem to be a footer template file. You need one to generate.")
# Open the header and footer files for reading
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
# Create the HTML page listing
page_list_item_file = os.path.join(cwd, "templates", "page_list_item.html")
if not os.path.exists(page_list_item_file):
page_list = '<ul class="page-list">\n'
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
for filename in files:
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + '<li class="page-list-item"><a href="' + subfolder_link + newFilename + \
'">' + newFilename2 + '</a><span class="page-list-item-time"> - ' + \
str(file_modified) + '</span></li>\n'
page_list = page_list + '</ul>'
else:
with open(page_list_item_file, 'r') as f:
page_list_item = f.read()
page_list = ""
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
p_content = convert_text(os.path.join(root, filename))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + page_list_item.replace("{path}", subfolder_link + newFilename).replace("{name}", newFilename2).replace(
"{date}", str(file_modified)).replace("{content}", p_content).replace("{content_short}", p_content[:250] + "...").replace("{day}", file_modified_day).replace("{month}", file_modified_month).replace("{month_name}", month_name).replace("{year}", file_modified_year)
if home_page_list == "yes" or home_page_list:
# Open the home page file (index.html) for writing
home_working_file = open(os.path.join(cwd, outdir, "index.html"), "w")
home_working_file.write(header_file.read())
# Make sure there is actually a home page template file
home_templ_dir = os.path.join(cwd, "templates", "home_page.html")
if os.path.exists(home_templ_dir):
home_templ_file = open(home_templ_dir, "r")
home_working_file.write(home_templ_file.read())
else:
print("\nNo home page template file found. Writing page list to index.html")
home_working_file.write(page_list)
home_working_file.write(footer_file.read())
home_working_file.close()
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
if not filename.startswith("_"):
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
newFilename = get_html_filename(filename)
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
currents_working_file = open(
os.path.join(cwd, outdir, newFilename), "w")
else:
create_folder(os.path.join(cwd, outdir, subfolder))
currents_working_file = open(os.path.join(
cwd, outdir, subfolder, newFilename), "w")
# Write the header
currents_working_file.write(header_file.read())
text_cont1 = convert_text(os.path.join(root, filename))
if "+++++" in text_cont1.splitlines()[1]:
page_template_file = text_cont1.splitlines()[0]
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[0], "")
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[1], "")
else:
page_template_file = "content_page"
# Write the text content into the content template and onto the
# build file
content_templ_dir = os.path.join(
cwd, "templates", page_template_file + ".html")
if os.path.exists(content_templ_dir):
content_templ_file = open(content_templ_dir, "r")
content_templ_file1 = content_templ_file.read()
content_templ_file2 = content_templ_file1.replace(
"{page_content}", text_cont1)
currents_working_file.write(content_templ_file2)
else:
currents_working_file.write(text_cont1)
# Write the footer to the build file
currents_working_file.write("\n" + footer_file.read())
# Close the build file
currents_working_file.close()
# Find all the nav(something) templates in the `templates` folder and
# Read their content to the dict
navs = {}
for file in os.listdir(os.path.join(cwd, "templates")):
if "nav" in file:
nav_cont = open(os.path.join(cwd, "templates", file), "r")
navs[file.replace(".html", "")] = nav_cont.read()
nav_cont.close()
forbidden_dirs = set(["assets", "templates"])
blended_version_message = "Built with Blended v" + \
str(app_version)
build_date = str(datetime.now().date())
build_time = str(datetime.now().time())
build_datetime = str(datetime.now())
# Replace global variables such as site name and language
for root, dirs, files in os.walk(os.path.join(cwd, outdir)):
dirs[:] = [d for d in dirs if d not in forbidden_dirs]
for filename in files:
if filename != "config.pyc" and filename != "config.py":
newFilename = get_html_clear_filename(filename)
page_file = filename.replace(".html", "")
page_folder = os.path.basename(os.path.dirname(os.path.join(
root, filename))).replace("-", "").replace("_", "").title()
page_folder_orig = os.path.basename(
os.path.dirname(os.path.join(root, filename)))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, outdir), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_folder = os.path.join(cwd, outdir, filename)
else:
subfolder_folder = os.path.join(
cwd, outdir, subfolder, filename)
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
# The Loop!
for line in fileinput.input(subfolder_folder, inplace=1):
for var in custom_variables:
line = line.replace(
"{" + var + "}", custom_variables[var])
if len(plugins) != 0:
for i in range(len(plugins)):
if sys.version_info[0] < 2:
main = importlib.import_module(plugins[i])
elif sys.version_info[0] < 3:
main = __import__(plugins[i])
content = main.main()
line = line.replace(
"{" + plugins[i] + "}", content)
if "{nav" in line:
navname = line.split("{")[1].split("}")[0]
line = line.replace(
"{" + navname + "}", navs[(line.split("{"))[1].split("}")[0]])
line = line.replace(
"{website_description}", website_description)
line = line.replace(
"{website_description_long}", website_description_long)
line = line.replace("{website_license}", website_license)
line = line.replace("{website_language}", website_language)
line = line.replace("{website_url}", website_url)
line = line.replace("{author_name}", author_name)
line = line.replace("{author_bio}", author_bio)
line = line.replace("{random_number}",
str(randint(0, 100000000)))
line = line.replace("{build_date}", build_date)
line = line.replace("{build_time}", build_time)
line = line.replace("{build_datetime}", build_datetime)
line = line.replace("{page_list}", page_list)
line = line.replace("{page_name}", newFilename)
line = line.replace("{page_filename}", page_file)
line = line.replace("{page_file}", filename)
line = line.replace("{" + filename + "_active}", "active")
if page_folder != outdir.title():
line = line.replace("{page_folder}", page_folder)
else:
line = line.replace("{page_folder}", "")
if page_folder_orig != outdir:
line = line.replace(
"{page_folder_orig}", page_folder_orig)
else:
line = line.replace("{page_folder_orig}", "")
line = line.replace("{page_date}", str(file_modified))
line = line.replace("{page_day}", str(file_modified_day))
line = line.replace("{page_year}", str(file_modified_year))
line = line.replace(
"{page_month}", str(file_modified_month))
line = line.replace(
"{page_month_name}", str(month_name))
line = line.replace("{blended_version}", str(app_version))
line = line.replace(
"{blended_version_message}", blended_version_message)
line = line.replace("{website_name}", website_name)
top = os.path.join(cwd, outdir)
startinglevel = top.count(os.sep)
relative_path = ""
level = root.count(os.sep) - startinglevel
for i in range(level):
relative_path = relative_path + "../"
line = line.replace("{relative_root}", relative_path)
print(line.rstrip('\n'))
fileinput.close()
# Copy the asset folder to the build folder
if os.path.exists(os.path.join(cwd, "templates", "assets")):
if os.path.exists(os.path.join(cwd, outdir, "assets")):
shutil.rmtree(os.path.join(cwd, outdir, "assets"))
shutil.copytree(os.path.join(cwd, "templates", "assets"),
os.path.join(cwd, outdir, "assets"))
for root, dirs, files in os.walk(os.path.join(cwd, outdir, "assets")):
for file in files:
if not file.startswith("_"):
if (file.endswith(".sass")) or (file.endswith(".scss")):
sass_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if sass_text != "":
text_file.write(sass.compile(string=sass_text))
else:
print(file + " is empty! Not compiling Sass.")
text_file.close()
if file.endswith(".less"):
less_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if less_text != "":
text_file.write(lesscpy.compile(StringIO(less_text)))
else:
print(file + " is empty! Not compiling Less.")
text_file.close()
if file.endswith(".styl"):
try:
styl_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if styl_text != "":
text_file.write(Stylus().compile(styl_text))
else:
print(file + " is empty! Not compiling Styl.")
text_file.close()
except:
print("Not able to build with Stylus! Is it installed?")
try:
subprocess.call["npm", "install", "-g", "stylus"]
except:
print("NPM (NodeJS) not working. Is it installed?")
if file.endswith(".coffee"):
coffee_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file[:-6] + "js"), "w")
if coffee_text != "":
text_file.write(coffeescript.compile(coffee_text))
else:
print(file + " is empty! Not compiling CoffeeScript.")
text_file.close()
if minify_css:
if file.endswith(".css"):
css_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if css_text != "":
text_file.write(cssmin(css_text))
text_file.close()
if minify_js:
if file.endswith(".js"):
js_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if js_text != "":
text_file.write(jsmin(js_text))
text_file.close()
@cli.command('build', short_help='Build the Blended files into a website')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def build(outdir):
"""Blends the generated files and outputs a HTML website"""
print("Building your Blended files into a website!")
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("The files are built! You can find them in the " + outdir +
"/ directory. Run the view command to see what you have created in a web browser.")
outdir_type = "build"
class Watcher:
"""Watch the specified dirs for changes"""
DIRECTORY_TO_WATCH = os.path.join(cwd, "content")
def __init__(self):
self.observer = Observer()
def run(self):
"""Run the builder on changes"""
event_handler = Handler()
threads = []
paths = [os.path.join(cwd, "content"), os.path.join(cwd, "templates")]
for i in paths:
targetPath = str(i)
self.observer.schedule(event_handler, targetPath, recursive=True)
threads.append(self.observer)
self.observer.start()
try:
while True:
time.sleep(5)
except:
self.observer.stop()
print("\nObserver stopped.")
self.observer.join()
class Handler(FileSystemEventHandler):
"""The handler for the file change watcher"""
@staticmethod
def on_any_event(event):
global outdir_type
if event.is_directory:
return None
elif event.event_type == 'created':
# Take any action here when a file is first created.
build_files(outdir_type)
print("%s created" % event.src_path)
elif event.event_type == 'modified':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s modified" % event.src_path)
elif event.event_type == 'deleted':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s deleted" % event.src_path)
@cli.command('interactive', short_help='Build the Blended files into a website on each file change')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def interactive(outdir):
"""Blends the generated files and outputs a HTML website on file change"""
print("Building your Blended files into a website!")
global outdir_type
outdir_type = outdir
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("Watching the content and templates directories for changes, press CTRL+C to stop...\n")
w = Watcher()
w.run()
@cli.command('view', short_help='View the finished Blended website')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def view(outdir):
"""Opens the built index.html file in a web browser"""
index_path = os.path.realpath(os.path.join(cwd, outdir, "index.html"))
if os.path.exists(index_path):
webbrowser.open('file://' + index_path)
else:
print("The index.html file could not be found in the " + outdir +
"/ folder! Have you deleted it or have you built with home_page_list set to 'no' in config.py?")
if __name__ == '__main__':
cli()
|
BlendedSiteGenerator/Blended | blended/__main__.py | zip_built | python | def zip_built(outdir):
print("Zipping the built files!")
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name
except:
sys.exit(
"Some of the configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
zip_dir = os.path.join(cwd, website_name.replace(" ", "_") + "-build-" +
str(datetime.now().date()))
if os.path.exists(build_dir):
shutil.make_archive(zip_dir, 'zip', build_dir)
else:
print("The " + outdir +
"/ folder could not be found! Have you run 'blended build' yet?") | Packages the build folder into a zip | train | https://github.com/BlendedSiteGenerator/Blended/blob/e5865a8633e461a22c86ef6ee98cdd7051c412ac/blended/__main__.py#L256-L280 | null | """This is the Blended Static Website Generator"""
# encoding=utf8
import os
import os.path
import sys
from sys import platform
import shutil
import fileinput
import webbrowser
from datetime import datetime
from random import randint
from ftplib import FTP, error_perm
import time
import calendar
import subprocess
import importlib
import click
import pkg_resources
import markdown
import textile
from docutils.core import publish_parts
import mammoth
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import sass
import pyjade
import lesscpy
from six import StringIO
from stylus import Stylus
import coffeescript
from jsmin import jsmin
from cssmin import cssmin
import pip
from .functions import create_folder, replace_folder, get_html_filename, get_html_clear_filename, getunzipped, checkConfig, createConfig, createBlendedFolders, parseXML
# Very important, get the directory that the user wants to run commands in
cwd = os.getcwd()
try:
app_version = pkg_resources.require("blended")[0].version
app_version = app_version[:3]
except:
app_version = "NOTSET"
print("WARNING: app_version not set.\n")
@click.group()
def cli():
"""Blended: Static Website Generator"""
@cli.command('info', short_help='Show info about Blended and the current project.')
def version():
"""Prints info about Blended"""
print("You are running Blended v" + app_version)
print("The current working directory is " + cwd)
@cli.command('install-template', short_help='Install a Blended template from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_template(username, repo):
"""Installs a Blended template from GitHub"""
print("Installing template from " + username + "/" + repo)
dpath = os.path.join(cwd, "templates")
getunzipped(username, repo, dpath)
@cli.command('import-wp', short_help='Import a site from WordPress')
@click.option('--filepath', prompt='WordPress export file', help='Which file holds the exported data from WordPress')
def import_wp(filepath):
"""Imports A WordPress export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from WordPress...")
wp = parseXML(filepath)
wname = wp.rss.channel.title.cdata
wdesc = wp.rss.channel.description.cdata
wlan = wp.rss.channel.language.cdata
wurl = wp.rss.channel.link.cdata
aname = wp.rss.channel.wp_author.wp_author_display_name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlan=wlan, wurl=wurl, aname=aname)
for item in wp.rss.channel.item:
with open(os.path.join(cwd, "content", item.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(item.content_encoded.cdata.strip())
print("\nYour website has been imported from WordPress.")
@cli.command('import-blogger', short_help='Import a site from Blogger')
@click.option('--filepath', prompt='Blogger export file', help='Which file holds the exported data from Blogger')
def import_blogger(filepath):
"""Imports A Blogger export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from Blogger...")
blogger = parseXML(filepath)
wname = blogger.feed.title.cdata
aname = blogger.feed.author.name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname, aname=aname)
for entry in blogger.feed.entry:
if "post" in entry.id.cdata:
with open(os.path.join(cwd, "content", entry.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(entry.content.cdata.strip())
print("\nYour website has been imported from Blogger.")
@cli.command('install-plugin', short_help='Install a Blended plugin from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_plugin(username, repo):
"""Installs a Blended plugin from GitHub"""
print("Installing plugin from " + username + "/" + repo)
pip.main(['install', '-U', "git+git://github.com/" +
username + "/" + repo + ".git"])
@cli.command('init', short_help='Initiate a new website')
def init():
"""Initiates a new website"""
print("Blended: Static Website Generator -\n")
checkConfig()
if (sys.version_info > (3, 0)):
wname = input("Website Name: ")
wdesc = input("Website Description: ")
wlan = input("Website Language: ")
wlic = input("Website License: ")
aname = input("Author(s) Name(s): ")
else:
wname = raw_input("Website Name: ")
wdesc = raw_input("Website Description: ")
wlan = raw_input("Website Language: ")
wlic = raw_input("Website License: ")
aname = raw_input("Author(s) Name(s): ")
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlic=wlic, wlan=wlan, aname=aname)
print("\nThe required files for your website have been generated.")
def placeFiles(ftp, path):
"""Upload the built files to FTP"""
for name in os.listdir(path):
if name != "config.py" and name != "config.pyc" and name != "templates" and name != "content":
localpath = os.path.join(path, name)
if os.path.isfile(localpath):
print("STOR", name, localpath)
ftp.storbinary('STOR ' + name, open(localpath, 'rb'))
elif os.path.isdir(localpath):
print("MKD", name)
try:
ftp.mkd(name)
# ignore "directory already exists"
except error_perm as e:
if not e.args[0].startswith('550'):
raise
print("CWD", name)
ftp.cwd(name)
placeFiles(ftp, localpath)
print("CWD", "..")
ftp.cwd("..")
@cli.command('ftp', short_help='Upload the files via ftp')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def send_ftp(outdir):
"""Upload the built website to FTP"""
print("Uploading the files in the " + outdir + "/ directory!\n")
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import ftp_server, ftp_username, ftp_password, ftp_port, ftp_upload_path
except:
sys.exit(
"The FTP settings could not be found. Maybe your config file is too old. Re-run 'blended init' to fix it.")
server = ftp_server
username = ftp_username
password = ftp_password
port = ftp_port
ftp = FTP()
ftp.connect(server, port)
ftp.login(username, password)
filenameCV = os.path.join(cwd, outdir)
try:
ftp.cwd(ftp_upload_path)
placeFiles(ftp, filenameCV)
except:
ftp.quit()
sys.exit("Files not able to be uploaded! Are you sure the directory exists?")
ftp.quit()
print("\nFTP Done!")
@cli.command('clean', short_help='Remove the build folder')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def clean_built(outdir):
"""Removes all built files"""
print("Removing the built files!")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
@cli.command('zip', short_help='Package the build folder into a zip file')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
@cli.command('purge', short_help='Purge all the files created by Blended')
def purge():
"""Removes all files generated by Blended"""
print("Purging the Blended files!")
# Remove the templates folder
templ_dir = os.path.join(cwd, "templates")
if os.path.exists(templ_dir):
shutil.rmtree(templ_dir)
# Remove the content folder
cont_dir = os.path.join(cwd, "content")
if os.path.exists(cont_dir):
shutil.rmtree(cont_dir)
# Remove the build folder
build_dir = os.path.join(cwd, "build")
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
# Remove config.py
config_file_dir = os.path.join(cwd, "config.py")
if os.path.exists(config_file_dir):
os.remove(config_file_dir)
# Remove config.pyc
config2_file_dir = os.path.join(cwd, "config.pyc")
if os.path.exists(config2_file_dir):
os.remove(config2_file_dir)
# Remove config.py
config3_file_dir = os.path.join(cwd, "config.py.oldbak")
if os.path.exists(config3_file_dir):
os.remove(config3_file_dir)
def convert_text(filename):
"""Convert the post/page content using the converters"""
text_content = open(filename, "r")
if ".md" in filename:
text_cont1 = "\n" + markdown.markdown(text_content.read()) + "\n"
elif ".docx" in filename:
with open(os.path.join(cwd, "content", filename), "rb") as docx_file:
result = mammoth.convert_to_html(docx_file)
final_docx_html = result.value
text_cont1 = "\n" + final_docx_html + "\n"
elif ".tile" in filename:
text_cont1 = "\n" + textile.textile(text_content.read()) + "\n"
elif ".jade" in filename:
text_cont1 = "\n" + pyjade.simple_convert(text_content.read()) + "\n"
elif ".rst" in filename:
text_cont1 = "\n" + \
publish_parts(text_content.read(), writer_name='html')[
'html_body'] + "\n"
elif ".html" in filename:
text_cont1 = text_content.read()
elif ".txt" in filename:
text_cont1 = text_content.read()
else:
print(filename + " is not a valid file type!")
text_cont1 = "NULL"
return text_cont1 + "\n\n"
def build_files(outdir):
"""Build the files!"""
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name, website_description, website_language, home_page_list
except:
sys.exit(
"ERROR: Some of the crucial configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
try:
from config import website_description_long, website_license, website_url, author_name, author_bio, plugins, minify_css, minify_js, custom_variables
except:
website_description_long = ""
website_license = ""
website_url = ""
author_name = ""
author_bio = ""
plugins = []
custom_variables = {}
minify_css = False
minify_js = False
print("WARNING: Some of the optional configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.\n")
# Create the build folder
build_dir = os.path.join(cwd, outdir)
if "." not in outdir and ".." not in outdir and "..." not in outdir and "...." not in outdir and "....." not in outdir:
replace_folder(build_dir)
# Make sure there is actually a header template file
header_file_dir = os.path.join(cwd, "templates", "header.html")
if not os.path.exists(header_file_dir):
sys.exit(
"There dosen't seem to be a header template file. You need one to generate.")
# Make sure there is actually a footer template file
footer_file_dir = os.path.join(cwd, "templates", "footer.html")
if not os.path.exists(footer_file_dir):
sys.exit(
"There dosen't seem to be a footer template file. You need one to generate.")
# Open the header and footer files for reading
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
# Create the HTML page listing
page_list_item_file = os.path.join(cwd, "templates", "page_list_item.html")
if not os.path.exists(page_list_item_file):
page_list = '<ul class="page-list">\n'
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
for filename in files:
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + '<li class="page-list-item"><a href="' + subfolder_link + newFilename + \
'">' + newFilename2 + '</a><span class="page-list-item-time"> - ' + \
str(file_modified) + '</span></li>\n'
page_list = page_list + '</ul>'
else:
with open(page_list_item_file, 'r') as f:
page_list_item = f.read()
page_list = ""
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
p_content = convert_text(os.path.join(root, filename))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + page_list_item.replace("{path}", subfolder_link + newFilename).replace("{name}", newFilename2).replace(
"{date}", str(file_modified)).replace("{content}", p_content).replace("{content_short}", p_content[:250] + "...").replace("{day}", file_modified_day).replace("{month}", file_modified_month).replace("{month_name}", month_name).replace("{year}", file_modified_year)
if home_page_list == "yes" or home_page_list:
# Open the home page file (index.html) for writing
home_working_file = open(os.path.join(cwd, outdir, "index.html"), "w")
home_working_file.write(header_file.read())
# Make sure there is actually a home page template file
home_templ_dir = os.path.join(cwd, "templates", "home_page.html")
if os.path.exists(home_templ_dir):
home_templ_file = open(home_templ_dir, "r")
home_working_file.write(home_templ_file.read())
else:
print("\nNo home page template file found. Writing page list to index.html")
home_working_file.write(page_list)
home_working_file.write(footer_file.read())
home_working_file.close()
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
if not filename.startswith("_"):
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
newFilename = get_html_filename(filename)
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
currents_working_file = open(
os.path.join(cwd, outdir, newFilename), "w")
else:
create_folder(os.path.join(cwd, outdir, subfolder))
currents_working_file = open(os.path.join(
cwd, outdir, subfolder, newFilename), "w")
# Write the header
currents_working_file.write(header_file.read())
text_cont1 = convert_text(os.path.join(root, filename))
if "+++++" in text_cont1.splitlines()[1]:
page_template_file = text_cont1.splitlines()[0]
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[0], "")
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[1], "")
else:
page_template_file = "content_page"
# Write the text content into the content template and onto the
# build file
content_templ_dir = os.path.join(
cwd, "templates", page_template_file + ".html")
if os.path.exists(content_templ_dir):
content_templ_file = open(content_templ_dir, "r")
content_templ_file1 = content_templ_file.read()
content_templ_file2 = content_templ_file1.replace(
"{page_content}", text_cont1)
currents_working_file.write(content_templ_file2)
else:
currents_working_file.write(text_cont1)
# Write the footer to the build file
currents_working_file.write("\n" + footer_file.read())
# Close the build file
currents_working_file.close()
# Find all the nav(something) templates in the `templates` folder and
# Read their content to the dict
navs = {}
for file in os.listdir(os.path.join(cwd, "templates")):
if "nav" in file:
nav_cont = open(os.path.join(cwd, "templates", file), "r")
navs[file.replace(".html", "")] = nav_cont.read()
nav_cont.close()
forbidden_dirs = set(["assets", "templates"])
blended_version_message = "Built with Blended v" + \
str(app_version)
build_date = str(datetime.now().date())
build_time = str(datetime.now().time())
build_datetime = str(datetime.now())
# Replace global variables such as site name and language
for root, dirs, files in os.walk(os.path.join(cwd, outdir)):
dirs[:] = [d for d in dirs if d not in forbidden_dirs]
for filename in files:
if filename != "config.pyc" and filename != "config.py":
newFilename = get_html_clear_filename(filename)
page_file = filename.replace(".html", "")
page_folder = os.path.basename(os.path.dirname(os.path.join(
root, filename))).replace("-", "").replace("_", "").title()
page_folder_orig = os.path.basename(
os.path.dirname(os.path.join(root, filename)))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, outdir), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_folder = os.path.join(cwd, outdir, filename)
else:
subfolder_folder = os.path.join(
cwd, outdir, subfolder, filename)
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
# The Loop!
for line in fileinput.input(subfolder_folder, inplace=1):
for var in custom_variables:
line = line.replace(
"{" + var + "}", custom_variables[var])
if len(plugins) != 0:
for i in range(len(plugins)):
if sys.version_info[0] < 2:
main = importlib.import_module(plugins[i])
elif sys.version_info[0] < 3:
main = __import__(plugins[i])
content = main.main()
line = line.replace(
"{" + plugins[i] + "}", content)
if "{nav" in line:
navname = line.split("{")[1].split("}")[0]
line = line.replace(
"{" + navname + "}", navs[(line.split("{"))[1].split("}")[0]])
line = line.replace(
"{website_description}", website_description)
line = line.replace(
"{website_description_long}", website_description_long)
line = line.replace("{website_license}", website_license)
line = line.replace("{website_language}", website_language)
line = line.replace("{website_url}", website_url)
line = line.replace("{author_name}", author_name)
line = line.replace("{author_bio}", author_bio)
line = line.replace("{random_number}",
str(randint(0, 100000000)))
line = line.replace("{build_date}", build_date)
line = line.replace("{build_time}", build_time)
line = line.replace("{build_datetime}", build_datetime)
line = line.replace("{page_list}", page_list)
line = line.replace("{page_name}", newFilename)
line = line.replace("{page_filename}", page_file)
line = line.replace("{page_file}", filename)
line = line.replace("{" + filename + "_active}", "active")
if page_folder != outdir.title():
line = line.replace("{page_folder}", page_folder)
else:
line = line.replace("{page_folder}", "")
if page_folder_orig != outdir:
line = line.replace(
"{page_folder_orig}", page_folder_orig)
else:
line = line.replace("{page_folder_orig}", "")
line = line.replace("{page_date}", str(file_modified))
line = line.replace("{page_day}", str(file_modified_day))
line = line.replace("{page_year}", str(file_modified_year))
line = line.replace(
"{page_month}", str(file_modified_month))
line = line.replace(
"{page_month_name}", str(month_name))
line = line.replace("{blended_version}", str(app_version))
line = line.replace(
"{blended_version_message}", blended_version_message)
line = line.replace("{website_name}", website_name)
top = os.path.join(cwd, outdir)
startinglevel = top.count(os.sep)
relative_path = ""
level = root.count(os.sep) - startinglevel
for i in range(level):
relative_path = relative_path + "../"
line = line.replace("{relative_root}", relative_path)
print(line.rstrip('\n'))
fileinput.close()
# Copy the asset folder to the build folder
if os.path.exists(os.path.join(cwd, "templates", "assets")):
if os.path.exists(os.path.join(cwd, outdir, "assets")):
shutil.rmtree(os.path.join(cwd, outdir, "assets"))
shutil.copytree(os.path.join(cwd, "templates", "assets"),
os.path.join(cwd, outdir, "assets"))
for root, dirs, files in os.walk(os.path.join(cwd, outdir, "assets")):
for file in files:
if not file.startswith("_"):
if (file.endswith(".sass")) or (file.endswith(".scss")):
sass_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if sass_text != "":
text_file.write(sass.compile(string=sass_text))
else:
print(file + " is empty! Not compiling Sass.")
text_file.close()
if file.endswith(".less"):
less_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if less_text != "":
text_file.write(lesscpy.compile(StringIO(less_text)))
else:
print(file + " is empty! Not compiling Less.")
text_file.close()
if file.endswith(".styl"):
try:
styl_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if styl_text != "":
text_file.write(Stylus().compile(styl_text))
else:
print(file + " is empty! Not compiling Styl.")
text_file.close()
except:
print("Not able to build with Stylus! Is it installed?")
try:
subprocess.call["npm", "install", "-g", "stylus"]
except:
print("NPM (NodeJS) not working. Is it installed?")
if file.endswith(".coffee"):
coffee_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file[:-6] + "js"), "w")
if coffee_text != "":
text_file.write(coffeescript.compile(coffee_text))
else:
print(file + " is empty! Not compiling CoffeeScript.")
text_file.close()
if minify_css:
if file.endswith(".css"):
css_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if css_text != "":
text_file.write(cssmin(css_text))
text_file.close()
if minify_js:
if file.endswith(".js"):
js_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if js_text != "":
text_file.write(jsmin(js_text))
text_file.close()
@cli.command('build', short_help='Build the Blended files into a website')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def build(outdir):
"""Blends the generated files and outputs a HTML website"""
print("Building your Blended files into a website!")
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("The files are built! You can find them in the " + outdir +
"/ directory. Run the view command to see what you have created in a web browser.")
outdir_type = "build"
class Watcher:
"""Watch the specified dirs for changes"""
DIRECTORY_TO_WATCH = os.path.join(cwd, "content")
def __init__(self):
self.observer = Observer()
def run(self):
"""Run the builder on changes"""
event_handler = Handler()
threads = []
paths = [os.path.join(cwd, "content"), os.path.join(cwd, "templates")]
for i in paths:
targetPath = str(i)
self.observer.schedule(event_handler, targetPath, recursive=True)
threads.append(self.observer)
self.observer.start()
try:
while True:
time.sleep(5)
except:
self.observer.stop()
print("\nObserver stopped.")
self.observer.join()
class Handler(FileSystemEventHandler):
"""The handler for the file change watcher"""
@staticmethod
def on_any_event(event):
global outdir_type
if event.is_directory:
return None
elif event.event_type == 'created':
# Take any action here when a file is first created.
build_files(outdir_type)
print("%s created" % event.src_path)
elif event.event_type == 'modified':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s modified" % event.src_path)
elif event.event_type == 'deleted':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s deleted" % event.src_path)
@cli.command('interactive', short_help='Build the Blended files into a website on each file change')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def interactive(outdir):
"""Blends the generated files and outputs a HTML website on file change"""
print("Building your Blended files into a website!")
global outdir_type
outdir_type = outdir
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("Watching the content and templates directories for changes, press CTRL+C to stop...\n")
w = Watcher()
w.run()
@cli.command('view', short_help='View the finished Blended website')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def view(outdir):
"""Opens the built index.html file in a web browser"""
index_path = os.path.realpath(os.path.join(cwd, outdir, "index.html"))
if os.path.exists(index_path):
webbrowser.open('file://' + index_path)
else:
print("The index.html file could not be found in the " + outdir +
"/ folder! Have you deleted it or have you built with home_page_list set to 'no' in config.py?")
if __name__ == '__main__':
cli()
|
BlendedSiteGenerator/Blended | blended/__main__.py | purge | python | def purge():
print("Purging the Blended files!")
# Remove the templates folder
templ_dir = os.path.join(cwd, "templates")
if os.path.exists(templ_dir):
shutil.rmtree(templ_dir)
# Remove the content folder
cont_dir = os.path.join(cwd, "content")
if os.path.exists(cont_dir):
shutil.rmtree(cont_dir)
# Remove the build folder
build_dir = os.path.join(cwd, "build")
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
# Remove config.py
config_file_dir = os.path.join(cwd, "config.py")
if os.path.exists(config_file_dir):
os.remove(config_file_dir)
# Remove config.pyc
config2_file_dir = os.path.join(cwd, "config.pyc")
if os.path.exists(config2_file_dir):
os.remove(config2_file_dir)
# Remove config.py
config3_file_dir = os.path.join(cwd, "config.py.oldbak")
if os.path.exists(config3_file_dir):
os.remove(config3_file_dir) | Removes all files generated by Blended | train | https://github.com/BlendedSiteGenerator/Blended/blob/e5865a8633e461a22c86ef6ee98cdd7051c412ac/blended/__main__.py#L284-L316 | null | """This is the Blended Static Website Generator"""
# encoding=utf8
import os
import os.path
import sys
from sys import platform
import shutil
import fileinput
import webbrowser
from datetime import datetime
from random import randint
from ftplib import FTP, error_perm
import time
import calendar
import subprocess
import importlib
import click
import pkg_resources
import markdown
import textile
from docutils.core import publish_parts
import mammoth
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import sass
import pyjade
import lesscpy
from six import StringIO
from stylus import Stylus
import coffeescript
from jsmin import jsmin
from cssmin import cssmin
import pip
from .functions import create_folder, replace_folder, get_html_filename, get_html_clear_filename, getunzipped, checkConfig, createConfig, createBlendedFolders, parseXML
# Very important, get the directory that the user wants to run commands in
cwd = os.getcwd()
try:
app_version = pkg_resources.require("blended")[0].version
app_version = app_version[:3]
except:
app_version = "NOTSET"
print("WARNING: app_version not set.\n")
@click.group()
def cli():
"""Blended: Static Website Generator"""
@cli.command('info', short_help='Show info about Blended and the current project.')
def version():
"""Prints info about Blended"""
print("You are running Blended v" + app_version)
print("The current working directory is " + cwd)
@cli.command('install-template', short_help='Install a Blended template from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_template(username, repo):
"""Installs a Blended template from GitHub"""
print("Installing template from " + username + "/" + repo)
dpath = os.path.join(cwd, "templates")
getunzipped(username, repo, dpath)
@cli.command('import-wp', short_help='Import a site from WordPress')
@click.option('--filepath', prompt='WordPress export file', help='Which file holds the exported data from WordPress')
def import_wp(filepath):
"""Imports A WordPress export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from WordPress...")
wp = parseXML(filepath)
wname = wp.rss.channel.title.cdata
wdesc = wp.rss.channel.description.cdata
wlan = wp.rss.channel.language.cdata
wurl = wp.rss.channel.link.cdata
aname = wp.rss.channel.wp_author.wp_author_display_name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlan=wlan, wurl=wurl, aname=aname)
for item in wp.rss.channel.item:
with open(os.path.join(cwd, "content", item.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(item.content_encoded.cdata.strip())
print("\nYour website has been imported from WordPress.")
@cli.command('import-blogger', short_help='Import a site from Blogger')
@click.option('--filepath', prompt='Blogger export file', help='Which file holds the exported data from Blogger')
def import_blogger(filepath):
"""Imports A Blogger export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from Blogger...")
blogger = parseXML(filepath)
wname = blogger.feed.title.cdata
aname = blogger.feed.author.name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname, aname=aname)
for entry in blogger.feed.entry:
if "post" in entry.id.cdata:
with open(os.path.join(cwd, "content", entry.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(entry.content.cdata.strip())
print("\nYour website has been imported from Blogger.")
@cli.command('install-plugin', short_help='Install a Blended plugin from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_plugin(username, repo):
"""Installs a Blended plugin from GitHub"""
print("Installing plugin from " + username + "/" + repo)
pip.main(['install', '-U', "git+git://github.com/" +
username + "/" + repo + ".git"])
@cli.command('init', short_help='Initiate a new website')
def init():
"""Initiates a new website"""
print("Blended: Static Website Generator -\n")
checkConfig()
if (sys.version_info > (3, 0)):
wname = input("Website Name: ")
wdesc = input("Website Description: ")
wlan = input("Website Language: ")
wlic = input("Website License: ")
aname = input("Author(s) Name(s): ")
else:
wname = raw_input("Website Name: ")
wdesc = raw_input("Website Description: ")
wlan = raw_input("Website Language: ")
wlic = raw_input("Website License: ")
aname = raw_input("Author(s) Name(s): ")
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlic=wlic, wlan=wlan, aname=aname)
print("\nThe required files for your website have been generated.")
def placeFiles(ftp, path):
"""Upload the built files to FTP"""
for name in os.listdir(path):
if name != "config.py" and name != "config.pyc" and name != "templates" and name != "content":
localpath = os.path.join(path, name)
if os.path.isfile(localpath):
print("STOR", name, localpath)
ftp.storbinary('STOR ' + name, open(localpath, 'rb'))
elif os.path.isdir(localpath):
print("MKD", name)
try:
ftp.mkd(name)
# ignore "directory already exists"
except error_perm as e:
if not e.args[0].startswith('550'):
raise
print("CWD", name)
ftp.cwd(name)
placeFiles(ftp, localpath)
print("CWD", "..")
ftp.cwd("..")
@cli.command('ftp', short_help='Upload the files via ftp')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def send_ftp(outdir):
"""Upload the built website to FTP"""
print("Uploading the files in the " + outdir + "/ directory!\n")
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import ftp_server, ftp_username, ftp_password, ftp_port, ftp_upload_path
except:
sys.exit(
"The FTP settings could not be found. Maybe your config file is too old. Re-run 'blended init' to fix it.")
server = ftp_server
username = ftp_username
password = ftp_password
port = ftp_port
ftp = FTP()
ftp.connect(server, port)
ftp.login(username, password)
filenameCV = os.path.join(cwd, outdir)
try:
ftp.cwd(ftp_upload_path)
placeFiles(ftp, filenameCV)
except:
ftp.quit()
sys.exit("Files not able to be uploaded! Are you sure the directory exists?")
ftp.quit()
print("\nFTP Done!")
@cli.command('clean', short_help='Remove the build folder')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def clean_built(outdir):
"""Removes all built files"""
print("Removing the built files!")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
@cli.command('zip', short_help='Package the build folder into a zip file')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def zip_built(outdir):
"""Packages the build folder into a zip"""
print("Zipping the built files!")
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name
except:
sys.exit(
"Some of the configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
zip_dir = os.path.join(cwd, website_name.replace(" ", "_") + "-build-" +
str(datetime.now().date()))
if os.path.exists(build_dir):
shutil.make_archive(zip_dir, 'zip', build_dir)
else:
print("The " + outdir +
"/ folder could not be found! Have you run 'blended build' yet?")
@cli.command('purge', short_help='Purge all the files created by Blended')
def convert_text(filename):
"""Convert the post/page content using the converters"""
text_content = open(filename, "r")
if ".md" in filename:
text_cont1 = "\n" + markdown.markdown(text_content.read()) + "\n"
elif ".docx" in filename:
with open(os.path.join(cwd, "content", filename), "rb") as docx_file:
result = mammoth.convert_to_html(docx_file)
final_docx_html = result.value
text_cont1 = "\n" + final_docx_html + "\n"
elif ".tile" in filename:
text_cont1 = "\n" + textile.textile(text_content.read()) + "\n"
elif ".jade" in filename:
text_cont1 = "\n" + pyjade.simple_convert(text_content.read()) + "\n"
elif ".rst" in filename:
text_cont1 = "\n" + \
publish_parts(text_content.read(), writer_name='html')[
'html_body'] + "\n"
elif ".html" in filename:
text_cont1 = text_content.read()
elif ".txt" in filename:
text_cont1 = text_content.read()
else:
print(filename + " is not a valid file type!")
text_cont1 = "NULL"
return text_cont1 + "\n\n"
def build_files(outdir):
"""Build the files!"""
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name, website_description, website_language, home_page_list
except:
sys.exit(
"ERROR: Some of the crucial configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
try:
from config import website_description_long, website_license, website_url, author_name, author_bio, plugins, minify_css, minify_js, custom_variables
except:
website_description_long = ""
website_license = ""
website_url = ""
author_name = ""
author_bio = ""
plugins = []
custom_variables = {}
minify_css = False
minify_js = False
print("WARNING: Some of the optional configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.\n")
# Create the build folder
build_dir = os.path.join(cwd, outdir)
if "." not in outdir and ".." not in outdir and "..." not in outdir and "...." not in outdir and "....." not in outdir:
replace_folder(build_dir)
# Make sure there is actually a header template file
header_file_dir = os.path.join(cwd, "templates", "header.html")
if not os.path.exists(header_file_dir):
sys.exit(
"There dosen't seem to be a header template file. You need one to generate.")
# Make sure there is actually a footer template file
footer_file_dir = os.path.join(cwd, "templates", "footer.html")
if not os.path.exists(footer_file_dir):
sys.exit(
"There dosen't seem to be a footer template file. You need one to generate.")
# Open the header and footer files for reading
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
# Create the HTML page listing
page_list_item_file = os.path.join(cwd, "templates", "page_list_item.html")
if not os.path.exists(page_list_item_file):
page_list = '<ul class="page-list">\n'
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
for filename in files:
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + '<li class="page-list-item"><a href="' + subfolder_link + newFilename + \
'">' + newFilename2 + '</a><span class="page-list-item-time"> - ' + \
str(file_modified) + '</span></li>\n'
page_list = page_list + '</ul>'
else:
with open(page_list_item_file, 'r') as f:
page_list_item = f.read()
page_list = ""
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
p_content = convert_text(os.path.join(root, filename))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + page_list_item.replace("{path}", subfolder_link + newFilename).replace("{name}", newFilename2).replace(
"{date}", str(file_modified)).replace("{content}", p_content).replace("{content_short}", p_content[:250] + "...").replace("{day}", file_modified_day).replace("{month}", file_modified_month).replace("{month_name}", month_name).replace("{year}", file_modified_year)
if home_page_list == "yes" or home_page_list:
# Open the home page file (index.html) for writing
home_working_file = open(os.path.join(cwd, outdir, "index.html"), "w")
home_working_file.write(header_file.read())
# Make sure there is actually a home page template file
home_templ_dir = os.path.join(cwd, "templates", "home_page.html")
if os.path.exists(home_templ_dir):
home_templ_file = open(home_templ_dir, "r")
home_working_file.write(home_templ_file.read())
else:
print("\nNo home page template file found. Writing page list to index.html")
home_working_file.write(page_list)
home_working_file.write(footer_file.read())
home_working_file.close()
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
if not filename.startswith("_"):
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
newFilename = get_html_filename(filename)
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
currents_working_file = open(
os.path.join(cwd, outdir, newFilename), "w")
else:
create_folder(os.path.join(cwd, outdir, subfolder))
currents_working_file = open(os.path.join(
cwd, outdir, subfolder, newFilename), "w")
# Write the header
currents_working_file.write(header_file.read())
text_cont1 = convert_text(os.path.join(root, filename))
if "+++++" in text_cont1.splitlines()[1]:
page_template_file = text_cont1.splitlines()[0]
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[0], "")
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[1], "")
else:
page_template_file = "content_page"
# Write the text content into the content template and onto the
# build file
content_templ_dir = os.path.join(
cwd, "templates", page_template_file + ".html")
if os.path.exists(content_templ_dir):
content_templ_file = open(content_templ_dir, "r")
content_templ_file1 = content_templ_file.read()
content_templ_file2 = content_templ_file1.replace(
"{page_content}", text_cont1)
currents_working_file.write(content_templ_file2)
else:
currents_working_file.write(text_cont1)
# Write the footer to the build file
currents_working_file.write("\n" + footer_file.read())
# Close the build file
currents_working_file.close()
# Find all the nav(something) templates in the `templates` folder and
# Read their content to the dict
navs = {}
for file in os.listdir(os.path.join(cwd, "templates")):
if "nav" in file:
nav_cont = open(os.path.join(cwd, "templates", file), "r")
navs[file.replace(".html", "")] = nav_cont.read()
nav_cont.close()
forbidden_dirs = set(["assets", "templates"])
blended_version_message = "Built with Blended v" + \
str(app_version)
build_date = str(datetime.now().date())
build_time = str(datetime.now().time())
build_datetime = str(datetime.now())
# Replace global variables such as site name and language
for root, dirs, files in os.walk(os.path.join(cwd, outdir)):
dirs[:] = [d for d in dirs if d not in forbidden_dirs]
for filename in files:
if filename != "config.pyc" and filename != "config.py":
newFilename = get_html_clear_filename(filename)
page_file = filename.replace(".html", "")
page_folder = os.path.basename(os.path.dirname(os.path.join(
root, filename))).replace("-", "").replace("_", "").title()
page_folder_orig = os.path.basename(
os.path.dirname(os.path.join(root, filename)))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, outdir), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_folder = os.path.join(cwd, outdir, filename)
else:
subfolder_folder = os.path.join(
cwd, outdir, subfolder, filename)
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
# The Loop!
for line in fileinput.input(subfolder_folder, inplace=1):
for var in custom_variables:
line = line.replace(
"{" + var + "}", custom_variables[var])
if len(plugins) != 0:
for i in range(len(plugins)):
if sys.version_info[0] < 2:
main = importlib.import_module(plugins[i])
elif sys.version_info[0] < 3:
main = __import__(plugins[i])
content = main.main()
line = line.replace(
"{" + plugins[i] + "}", content)
if "{nav" in line:
navname = line.split("{")[1].split("}")[0]
line = line.replace(
"{" + navname + "}", navs[(line.split("{"))[1].split("}")[0]])
line = line.replace(
"{website_description}", website_description)
line = line.replace(
"{website_description_long}", website_description_long)
line = line.replace("{website_license}", website_license)
line = line.replace("{website_language}", website_language)
line = line.replace("{website_url}", website_url)
line = line.replace("{author_name}", author_name)
line = line.replace("{author_bio}", author_bio)
line = line.replace("{random_number}",
str(randint(0, 100000000)))
line = line.replace("{build_date}", build_date)
line = line.replace("{build_time}", build_time)
line = line.replace("{build_datetime}", build_datetime)
line = line.replace("{page_list}", page_list)
line = line.replace("{page_name}", newFilename)
line = line.replace("{page_filename}", page_file)
line = line.replace("{page_file}", filename)
line = line.replace("{" + filename + "_active}", "active")
if page_folder != outdir.title():
line = line.replace("{page_folder}", page_folder)
else:
line = line.replace("{page_folder}", "")
if page_folder_orig != outdir:
line = line.replace(
"{page_folder_orig}", page_folder_orig)
else:
line = line.replace("{page_folder_orig}", "")
line = line.replace("{page_date}", str(file_modified))
line = line.replace("{page_day}", str(file_modified_day))
line = line.replace("{page_year}", str(file_modified_year))
line = line.replace(
"{page_month}", str(file_modified_month))
line = line.replace(
"{page_month_name}", str(month_name))
line = line.replace("{blended_version}", str(app_version))
line = line.replace(
"{blended_version_message}", blended_version_message)
line = line.replace("{website_name}", website_name)
top = os.path.join(cwd, outdir)
startinglevel = top.count(os.sep)
relative_path = ""
level = root.count(os.sep) - startinglevel
for i in range(level):
relative_path = relative_path + "../"
line = line.replace("{relative_root}", relative_path)
print(line.rstrip('\n'))
fileinput.close()
# Copy the asset folder to the build folder
if os.path.exists(os.path.join(cwd, "templates", "assets")):
if os.path.exists(os.path.join(cwd, outdir, "assets")):
shutil.rmtree(os.path.join(cwd, outdir, "assets"))
shutil.copytree(os.path.join(cwd, "templates", "assets"),
os.path.join(cwd, outdir, "assets"))
for root, dirs, files in os.walk(os.path.join(cwd, outdir, "assets")):
for file in files:
if not file.startswith("_"):
if (file.endswith(".sass")) or (file.endswith(".scss")):
sass_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if sass_text != "":
text_file.write(sass.compile(string=sass_text))
else:
print(file + " is empty! Not compiling Sass.")
text_file.close()
if file.endswith(".less"):
less_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if less_text != "":
text_file.write(lesscpy.compile(StringIO(less_text)))
else:
print(file + " is empty! Not compiling Less.")
text_file.close()
if file.endswith(".styl"):
try:
styl_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if styl_text != "":
text_file.write(Stylus().compile(styl_text))
else:
print(file + " is empty! Not compiling Styl.")
text_file.close()
except:
print("Not able to build with Stylus! Is it installed?")
try:
subprocess.call["npm", "install", "-g", "stylus"]
except:
print("NPM (NodeJS) not working. Is it installed?")
if file.endswith(".coffee"):
coffee_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file[:-6] + "js"), "w")
if coffee_text != "":
text_file.write(coffeescript.compile(coffee_text))
else:
print(file + " is empty! Not compiling CoffeeScript.")
text_file.close()
if minify_css:
if file.endswith(".css"):
css_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if css_text != "":
text_file.write(cssmin(css_text))
text_file.close()
if minify_js:
if file.endswith(".js"):
js_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if js_text != "":
text_file.write(jsmin(js_text))
text_file.close()
@cli.command('build', short_help='Build the Blended files into a website')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def build(outdir):
"""Blends the generated files and outputs a HTML website"""
print("Building your Blended files into a website!")
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("The files are built! You can find them in the " + outdir +
"/ directory. Run the view command to see what you have created in a web browser.")
outdir_type = "build"
class Watcher:
"""Watch the specified dirs for changes"""
DIRECTORY_TO_WATCH = os.path.join(cwd, "content")
def __init__(self):
self.observer = Observer()
def run(self):
"""Run the builder on changes"""
event_handler = Handler()
threads = []
paths = [os.path.join(cwd, "content"), os.path.join(cwd, "templates")]
for i in paths:
targetPath = str(i)
self.observer.schedule(event_handler, targetPath, recursive=True)
threads.append(self.observer)
self.observer.start()
try:
while True:
time.sleep(5)
except:
self.observer.stop()
print("\nObserver stopped.")
self.observer.join()
class Handler(FileSystemEventHandler):
"""The handler for the file change watcher"""
@staticmethod
def on_any_event(event):
global outdir_type
if event.is_directory:
return None
elif event.event_type == 'created':
# Take any action here when a file is first created.
build_files(outdir_type)
print("%s created" % event.src_path)
elif event.event_type == 'modified':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s modified" % event.src_path)
elif event.event_type == 'deleted':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s deleted" % event.src_path)
@cli.command('interactive', short_help='Build the Blended files into a website on each file change')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def interactive(outdir):
"""Blends the generated files and outputs a HTML website on file change"""
print("Building your Blended files into a website!")
global outdir_type
outdir_type = outdir
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("Watching the content and templates directories for changes, press CTRL+C to stop...\n")
w = Watcher()
w.run()
@cli.command('view', short_help='View the finished Blended website')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def view(outdir):
"""Opens the built index.html file in a web browser"""
index_path = os.path.realpath(os.path.join(cwd, outdir, "index.html"))
if os.path.exists(index_path):
webbrowser.open('file://' + index_path)
else:
print("The index.html file could not be found in the " + outdir +
"/ folder! Have you deleted it or have you built with home_page_list set to 'no' in config.py?")
if __name__ == '__main__':
cli()
|
BlendedSiteGenerator/Blended | blended/__main__.py | convert_text | python | def convert_text(filename):
text_content = open(filename, "r")
if ".md" in filename:
text_cont1 = "\n" + markdown.markdown(text_content.read()) + "\n"
elif ".docx" in filename:
with open(os.path.join(cwd, "content", filename), "rb") as docx_file:
result = mammoth.convert_to_html(docx_file)
final_docx_html = result.value
text_cont1 = "\n" + final_docx_html + "\n"
elif ".tile" in filename:
text_cont1 = "\n" + textile.textile(text_content.read()) + "\n"
elif ".jade" in filename:
text_cont1 = "\n" + pyjade.simple_convert(text_content.read()) + "\n"
elif ".rst" in filename:
text_cont1 = "\n" + \
publish_parts(text_content.read(), writer_name='html')[
'html_body'] + "\n"
elif ".html" in filename:
text_cont1 = text_content.read()
elif ".txt" in filename:
text_cont1 = text_content.read()
else:
print(filename + " is not a valid file type!")
text_cont1 = "NULL"
return text_cont1 + "\n\n" | Convert the post/page content using the converters | train | https://github.com/BlendedSiteGenerator/Blended/blob/e5865a8633e461a22c86ef6ee98cdd7051c412ac/blended/__main__.py#L319-L345 | null | """This is the Blended Static Website Generator"""
# encoding=utf8
import os
import os.path
import sys
from sys import platform
import shutil
import fileinput
import webbrowser
from datetime import datetime
from random import randint
from ftplib import FTP, error_perm
import time
import calendar
import subprocess
import importlib
import click
import pkg_resources
import markdown
import textile
from docutils.core import publish_parts
import mammoth
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import sass
import pyjade
import lesscpy
from six import StringIO
from stylus import Stylus
import coffeescript
from jsmin import jsmin
from cssmin import cssmin
import pip
from .functions import create_folder, replace_folder, get_html_filename, get_html_clear_filename, getunzipped, checkConfig, createConfig, createBlendedFolders, parseXML
# Very important, get the directory that the user wants to run commands in
cwd = os.getcwd()
try:
app_version = pkg_resources.require("blended")[0].version
app_version = app_version[:3]
except:
app_version = "NOTSET"
print("WARNING: app_version not set.\n")
@click.group()
def cli():
"""Blended: Static Website Generator"""
@cli.command('info', short_help='Show info about Blended and the current project.')
def version():
"""Prints info about Blended"""
print("You are running Blended v" + app_version)
print("The current working directory is " + cwd)
@cli.command('install-template', short_help='Install a Blended template from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_template(username, repo):
"""Installs a Blended template from GitHub"""
print("Installing template from " + username + "/" + repo)
dpath = os.path.join(cwd, "templates")
getunzipped(username, repo, dpath)
@cli.command('import-wp', short_help='Import a site from WordPress')
@click.option('--filepath', prompt='WordPress export file', help='Which file holds the exported data from WordPress')
def import_wp(filepath):
"""Imports A WordPress export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from WordPress...")
wp = parseXML(filepath)
wname = wp.rss.channel.title.cdata
wdesc = wp.rss.channel.description.cdata
wlan = wp.rss.channel.language.cdata
wurl = wp.rss.channel.link.cdata
aname = wp.rss.channel.wp_author.wp_author_display_name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlan=wlan, wurl=wurl, aname=aname)
for item in wp.rss.channel.item:
with open(os.path.join(cwd, "content", item.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(item.content_encoded.cdata.strip())
print("\nYour website has been imported from WordPress.")
@cli.command('import-blogger', short_help='Import a site from Blogger')
@click.option('--filepath', prompt='Blogger export file', help='Which file holds the exported data from Blogger')
def import_blogger(filepath):
"""Imports A Blogger export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from Blogger...")
blogger = parseXML(filepath)
wname = blogger.feed.title.cdata
aname = blogger.feed.author.name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname, aname=aname)
for entry in blogger.feed.entry:
if "post" in entry.id.cdata:
with open(os.path.join(cwd, "content", entry.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(entry.content.cdata.strip())
print("\nYour website has been imported from Blogger.")
@cli.command('install-plugin', short_help='Install a Blended plugin from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_plugin(username, repo):
"""Installs a Blended plugin from GitHub"""
print("Installing plugin from " + username + "/" + repo)
pip.main(['install', '-U', "git+git://github.com/" +
username + "/" + repo + ".git"])
@cli.command('init', short_help='Initiate a new website')
def init():
"""Initiates a new website"""
print("Blended: Static Website Generator -\n")
checkConfig()
if (sys.version_info > (3, 0)):
wname = input("Website Name: ")
wdesc = input("Website Description: ")
wlan = input("Website Language: ")
wlic = input("Website License: ")
aname = input("Author(s) Name(s): ")
else:
wname = raw_input("Website Name: ")
wdesc = raw_input("Website Description: ")
wlan = raw_input("Website Language: ")
wlic = raw_input("Website License: ")
aname = raw_input("Author(s) Name(s): ")
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlic=wlic, wlan=wlan, aname=aname)
print("\nThe required files for your website have been generated.")
def placeFiles(ftp, path):
"""Upload the built files to FTP"""
for name in os.listdir(path):
if name != "config.py" and name != "config.pyc" and name != "templates" and name != "content":
localpath = os.path.join(path, name)
if os.path.isfile(localpath):
print("STOR", name, localpath)
ftp.storbinary('STOR ' + name, open(localpath, 'rb'))
elif os.path.isdir(localpath):
print("MKD", name)
try:
ftp.mkd(name)
# ignore "directory already exists"
except error_perm as e:
if not e.args[0].startswith('550'):
raise
print("CWD", name)
ftp.cwd(name)
placeFiles(ftp, localpath)
print("CWD", "..")
ftp.cwd("..")
@cli.command('ftp', short_help='Upload the files via ftp')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def send_ftp(outdir):
"""Upload the built website to FTP"""
print("Uploading the files in the " + outdir + "/ directory!\n")
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import ftp_server, ftp_username, ftp_password, ftp_port, ftp_upload_path
except:
sys.exit(
"The FTP settings could not be found. Maybe your config file is too old. Re-run 'blended init' to fix it.")
server = ftp_server
username = ftp_username
password = ftp_password
port = ftp_port
ftp = FTP()
ftp.connect(server, port)
ftp.login(username, password)
filenameCV = os.path.join(cwd, outdir)
try:
ftp.cwd(ftp_upload_path)
placeFiles(ftp, filenameCV)
except:
ftp.quit()
sys.exit("Files not able to be uploaded! Are you sure the directory exists?")
ftp.quit()
print("\nFTP Done!")
@cli.command('clean', short_help='Remove the build folder')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def clean_built(outdir):
"""Removes all built files"""
print("Removing the built files!")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
@cli.command('zip', short_help='Package the build folder into a zip file')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def zip_built(outdir):
"""Packages the build folder into a zip"""
print("Zipping the built files!")
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name
except:
sys.exit(
"Some of the configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
zip_dir = os.path.join(cwd, website_name.replace(" ", "_") + "-build-" +
str(datetime.now().date()))
if os.path.exists(build_dir):
shutil.make_archive(zip_dir, 'zip', build_dir)
else:
print("The " + outdir +
"/ folder could not be found! Have you run 'blended build' yet?")
@cli.command('purge', short_help='Purge all the files created by Blended')
def purge():
"""Removes all files generated by Blended"""
print("Purging the Blended files!")
# Remove the templates folder
templ_dir = os.path.join(cwd, "templates")
if os.path.exists(templ_dir):
shutil.rmtree(templ_dir)
# Remove the content folder
cont_dir = os.path.join(cwd, "content")
if os.path.exists(cont_dir):
shutil.rmtree(cont_dir)
# Remove the build folder
build_dir = os.path.join(cwd, "build")
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
# Remove config.py
config_file_dir = os.path.join(cwd, "config.py")
if os.path.exists(config_file_dir):
os.remove(config_file_dir)
# Remove config.pyc
config2_file_dir = os.path.join(cwd, "config.pyc")
if os.path.exists(config2_file_dir):
os.remove(config2_file_dir)
# Remove config.py
config3_file_dir = os.path.join(cwd, "config.py.oldbak")
if os.path.exists(config3_file_dir):
os.remove(config3_file_dir)
def build_files(outdir):
"""Build the files!"""
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name, website_description, website_language, home_page_list
except:
sys.exit(
"ERROR: Some of the crucial configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
try:
from config import website_description_long, website_license, website_url, author_name, author_bio, plugins, minify_css, minify_js, custom_variables
except:
website_description_long = ""
website_license = ""
website_url = ""
author_name = ""
author_bio = ""
plugins = []
custom_variables = {}
minify_css = False
minify_js = False
print("WARNING: Some of the optional configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.\n")
# Create the build folder
build_dir = os.path.join(cwd, outdir)
if "." not in outdir and ".." not in outdir and "..." not in outdir and "...." not in outdir and "....." not in outdir:
replace_folder(build_dir)
# Make sure there is actually a header template file
header_file_dir = os.path.join(cwd, "templates", "header.html")
if not os.path.exists(header_file_dir):
sys.exit(
"There dosen't seem to be a header template file. You need one to generate.")
# Make sure there is actually a footer template file
footer_file_dir = os.path.join(cwd, "templates", "footer.html")
if not os.path.exists(footer_file_dir):
sys.exit(
"There dosen't seem to be a footer template file. You need one to generate.")
# Open the header and footer files for reading
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
# Create the HTML page listing
page_list_item_file = os.path.join(cwd, "templates", "page_list_item.html")
if not os.path.exists(page_list_item_file):
page_list = '<ul class="page-list">\n'
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
for filename in files:
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + '<li class="page-list-item"><a href="' + subfolder_link + newFilename + \
'">' + newFilename2 + '</a><span class="page-list-item-time"> - ' + \
str(file_modified) + '</span></li>\n'
page_list = page_list + '</ul>'
else:
with open(page_list_item_file, 'r') as f:
page_list_item = f.read()
page_list = ""
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
p_content = convert_text(os.path.join(root, filename))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + page_list_item.replace("{path}", subfolder_link + newFilename).replace("{name}", newFilename2).replace(
"{date}", str(file_modified)).replace("{content}", p_content).replace("{content_short}", p_content[:250] + "...").replace("{day}", file_modified_day).replace("{month}", file_modified_month).replace("{month_name}", month_name).replace("{year}", file_modified_year)
if home_page_list == "yes" or home_page_list:
# Open the home page file (index.html) for writing
home_working_file = open(os.path.join(cwd, outdir, "index.html"), "w")
home_working_file.write(header_file.read())
# Make sure there is actually a home page template file
home_templ_dir = os.path.join(cwd, "templates", "home_page.html")
if os.path.exists(home_templ_dir):
home_templ_file = open(home_templ_dir, "r")
home_working_file.write(home_templ_file.read())
else:
print("\nNo home page template file found. Writing page list to index.html")
home_working_file.write(page_list)
home_working_file.write(footer_file.read())
home_working_file.close()
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
if not filename.startswith("_"):
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
newFilename = get_html_filename(filename)
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
currents_working_file = open(
os.path.join(cwd, outdir, newFilename), "w")
else:
create_folder(os.path.join(cwd, outdir, subfolder))
currents_working_file = open(os.path.join(
cwd, outdir, subfolder, newFilename), "w")
# Write the header
currents_working_file.write(header_file.read())
text_cont1 = convert_text(os.path.join(root, filename))
if "+++++" in text_cont1.splitlines()[1]:
page_template_file = text_cont1.splitlines()[0]
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[0], "")
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[1], "")
else:
page_template_file = "content_page"
# Write the text content into the content template and onto the
# build file
content_templ_dir = os.path.join(
cwd, "templates", page_template_file + ".html")
if os.path.exists(content_templ_dir):
content_templ_file = open(content_templ_dir, "r")
content_templ_file1 = content_templ_file.read()
content_templ_file2 = content_templ_file1.replace(
"{page_content}", text_cont1)
currents_working_file.write(content_templ_file2)
else:
currents_working_file.write(text_cont1)
# Write the footer to the build file
currents_working_file.write("\n" + footer_file.read())
# Close the build file
currents_working_file.close()
# Find all the nav(something) templates in the `templates` folder and
# Read their content to the dict
navs = {}
for file in os.listdir(os.path.join(cwd, "templates")):
if "nav" in file:
nav_cont = open(os.path.join(cwd, "templates", file), "r")
navs[file.replace(".html", "")] = nav_cont.read()
nav_cont.close()
forbidden_dirs = set(["assets", "templates"])
blended_version_message = "Built with Blended v" + \
str(app_version)
build_date = str(datetime.now().date())
build_time = str(datetime.now().time())
build_datetime = str(datetime.now())
# Replace global variables such as site name and language
for root, dirs, files in os.walk(os.path.join(cwd, outdir)):
dirs[:] = [d for d in dirs if d not in forbidden_dirs]
for filename in files:
if filename != "config.pyc" and filename != "config.py":
newFilename = get_html_clear_filename(filename)
page_file = filename.replace(".html", "")
page_folder = os.path.basename(os.path.dirname(os.path.join(
root, filename))).replace("-", "").replace("_", "").title()
page_folder_orig = os.path.basename(
os.path.dirname(os.path.join(root, filename)))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, outdir), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_folder = os.path.join(cwd, outdir, filename)
else:
subfolder_folder = os.path.join(
cwd, outdir, subfolder, filename)
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
# The Loop!
for line in fileinput.input(subfolder_folder, inplace=1):
for var in custom_variables:
line = line.replace(
"{" + var + "}", custom_variables[var])
if len(plugins) != 0:
for i in range(len(plugins)):
if sys.version_info[0] < 2:
main = importlib.import_module(plugins[i])
elif sys.version_info[0] < 3:
main = __import__(plugins[i])
content = main.main()
line = line.replace(
"{" + plugins[i] + "}", content)
if "{nav" in line:
navname = line.split("{")[1].split("}")[0]
line = line.replace(
"{" + navname + "}", navs[(line.split("{"))[1].split("}")[0]])
line = line.replace(
"{website_description}", website_description)
line = line.replace(
"{website_description_long}", website_description_long)
line = line.replace("{website_license}", website_license)
line = line.replace("{website_language}", website_language)
line = line.replace("{website_url}", website_url)
line = line.replace("{author_name}", author_name)
line = line.replace("{author_bio}", author_bio)
line = line.replace("{random_number}",
str(randint(0, 100000000)))
line = line.replace("{build_date}", build_date)
line = line.replace("{build_time}", build_time)
line = line.replace("{build_datetime}", build_datetime)
line = line.replace("{page_list}", page_list)
line = line.replace("{page_name}", newFilename)
line = line.replace("{page_filename}", page_file)
line = line.replace("{page_file}", filename)
line = line.replace("{" + filename + "_active}", "active")
if page_folder != outdir.title():
line = line.replace("{page_folder}", page_folder)
else:
line = line.replace("{page_folder}", "")
if page_folder_orig != outdir:
line = line.replace(
"{page_folder_orig}", page_folder_orig)
else:
line = line.replace("{page_folder_orig}", "")
line = line.replace("{page_date}", str(file_modified))
line = line.replace("{page_day}", str(file_modified_day))
line = line.replace("{page_year}", str(file_modified_year))
line = line.replace(
"{page_month}", str(file_modified_month))
line = line.replace(
"{page_month_name}", str(month_name))
line = line.replace("{blended_version}", str(app_version))
line = line.replace(
"{blended_version_message}", blended_version_message)
line = line.replace("{website_name}", website_name)
top = os.path.join(cwd, outdir)
startinglevel = top.count(os.sep)
relative_path = ""
level = root.count(os.sep) - startinglevel
for i in range(level):
relative_path = relative_path + "../"
line = line.replace("{relative_root}", relative_path)
print(line.rstrip('\n'))
fileinput.close()
# Copy the asset folder to the build folder
if os.path.exists(os.path.join(cwd, "templates", "assets")):
if os.path.exists(os.path.join(cwd, outdir, "assets")):
shutil.rmtree(os.path.join(cwd, outdir, "assets"))
shutil.copytree(os.path.join(cwd, "templates", "assets"),
os.path.join(cwd, outdir, "assets"))
for root, dirs, files in os.walk(os.path.join(cwd, outdir, "assets")):
for file in files:
if not file.startswith("_"):
if (file.endswith(".sass")) or (file.endswith(".scss")):
sass_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if sass_text != "":
text_file.write(sass.compile(string=sass_text))
else:
print(file + " is empty! Not compiling Sass.")
text_file.close()
if file.endswith(".less"):
less_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if less_text != "":
text_file.write(lesscpy.compile(StringIO(less_text)))
else:
print(file + " is empty! Not compiling Less.")
text_file.close()
if file.endswith(".styl"):
try:
styl_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if styl_text != "":
text_file.write(Stylus().compile(styl_text))
else:
print(file + " is empty! Not compiling Styl.")
text_file.close()
except:
print("Not able to build with Stylus! Is it installed?")
try:
subprocess.call["npm", "install", "-g", "stylus"]
except:
print("NPM (NodeJS) not working. Is it installed?")
if file.endswith(".coffee"):
coffee_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file[:-6] + "js"), "w")
if coffee_text != "":
text_file.write(coffeescript.compile(coffee_text))
else:
print(file + " is empty! Not compiling CoffeeScript.")
text_file.close()
if minify_css:
if file.endswith(".css"):
css_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if css_text != "":
text_file.write(cssmin(css_text))
text_file.close()
if minify_js:
if file.endswith(".js"):
js_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if js_text != "":
text_file.write(jsmin(js_text))
text_file.close()
@cli.command('build', short_help='Build the Blended files into a website')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def build(outdir):
"""Blends the generated files and outputs a HTML website"""
print("Building your Blended files into a website!")
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("The files are built! You can find them in the " + outdir +
"/ directory. Run the view command to see what you have created in a web browser.")
outdir_type = "build"
class Watcher:
"""Watch the specified dirs for changes"""
DIRECTORY_TO_WATCH = os.path.join(cwd, "content")
def __init__(self):
self.observer = Observer()
def run(self):
"""Run the builder on changes"""
event_handler = Handler()
threads = []
paths = [os.path.join(cwd, "content"), os.path.join(cwd, "templates")]
for i in paths:
targetPath = str(i)
self.observer.schedule(event_handler, targetPath, recursive=True)
threads.append(self.observer)
self.observer.start()
try:
while True:
time.sleep(5)
except:
self.observer.stop()
print("\nObserver stopped.")
self.observer.join()
class Handler(FileSystemEventHandler):
"""The handler for the file change watcher"""
@staticmethod
def on_any_event(event):
global outdir_type
if event.is_directory:
return None
elif event.event_type == 'created':
# Take any action here when a file is first created.
build_files(outdir_type)
print("%s created" % event.src_path)
elif event.event_type == 'modified':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s modified" % event.src_path)
elif event.event_type == 'deleted':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s deleted" % event.src_path)
@cli.command('interactive', short_help='Build the Blended files into a website on each file change')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def interactive(outdir):
"""Blends the generated files and outputs a HTML website on file change"""
print("Building your Blended files into a website!")
global outdir_type
outdir_type = outdir
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("Watching the content and templates directories for changes, press CTRL+C to stop...\n")
w = Watcher()
w.run()
@cli.command('view', short_help='View the finished Blended website')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def view(outdir):
"""Opens the built index.html file in a web browser"""
index_path = os.path.realpath(os.path.join(cwd, outdir, "index.html"))
if os.path.exists(index_path):
webbrowser.open('file://' + index_path)
else:
print("The index.html file could not be found in the " + outdir +
"/ folder! Have you deleted it or have you built with home_page_list set to 'no' in config.py?")
if __name__ == '__main__':
cli()
|
BlendedSiteGenerator/Blended | blended/__main__.py | build_files | python | def build_files(outdir):
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name, website_description, website_language, home_page_list
except:
sys.exit(
"ERROR: Some of the crucial configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
try:
from config import website_description_long, website_license, website_url, author_name, author_bio, plugins, minify_css, minify_js, custom_variables
except:
website_description_long = ""
website_license = ""
website_url = ""
author_name = ""
author_bio = ""
plugins = []
custom_variables = {}
minify_css = False
minify_js = False
print("WARNING: Some of the optional configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.\n")
# Create the build folder
build_dir = os.path.join(cwd, outdir)
if "." not in outdir and ".." not in outdir and "..." not in outdir and "...." not in outdir and "....." not in outdir:
replace_folder(build_dir)
# Make sure there is actually a header template file
header_file_dir = os.path.join(cwd, "templates", "header.html")
if not os.path.exists(header_file_dir):
sys.exit(
"There dosen't seem to be a header template file. You need one to generate.")
# Make sure there is actually a footer template file
footer_file_dir = os.path.join(cwd, "templates", "footer.html")
if not os.path.exists(footer_file_dir):
sys.exit(
"There dosen't seem to be a footer template file. You need one to generate.")
# Open the header and footer files for reading
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
# Create the HTML page listing
page_list_item_file = os.path.join(cwd, "templates", "page_list_item.html")
if not os.path.exists(page_list_item_file):
page_list = '<ul class="page-list">\n'
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
for filename in files:
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + '<li class="page-list-item"><a href="' + subfolder_link + newFilename + \
'">' + newFilename2 + '</a><span class="page-list-item-time"> - ' + \
str(file_modified) + '</span></li>\n'
page_list = page_list + '</ul>'
else:
with open(page_list_item_file, 'r') as f:
page_list_item = f.read()
page_list = ""
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
p_content = convert_text(os.path.join(root, filename))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + page_list_item.replace("{path}", subfolder_link + newFilename).replace("{name}", newFilename2).replace(
"{date}", str(file_modified)).replace("{content}", p_content).replace("{content_short}", p_content[:250] + "...").replace("{day}", file_modified_day).replace("{month}", file_modified_month).replace("{month_name}", month_name).replace("{year}", file_modified_year)
if home_page_list == "yes" or home_page_list:
# Open the home page file (index.html) for writing
home_working_file = open(os.path.join(cwd, outdir, "index.html"), "w")
home_working_file.write(header_file.read())
# Make sure there is actually a home page template file
home_templ_dir = os.path.join(cwd, "templates", "home_page.html")
if os.path.exists(home_templ_dir):
home_templ_file = open(home_templ_dir, "r")
home_working_file.write(home_templ_file.read())
else:
print("\nNo home page template file found. Writing page list to index.html")
home_working_file.write(page_list)
home_working_file.write(footer_file.read())
home_working_file.close()
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
if not filename.startswith("_"):
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
newFilename = get_html_filename(filename)
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
currents_working_file = open(
os.path.join(cwd, outdir, newFilename), "w")
else:
create_folder(os.path.join(cwd, outdir, subfolder))
currents_working_file = open(os.path.join(
cwd, outdir, subfolder, newFilename), "w")
# Write the header
currents_working_file.write(header_file.read())
text_cont1 = convert_text(os.path.join(root, filename))
if "+++++" in text_cont1.splitlines()[1]:
page_template_file = text_cont1.splitlines()[0]
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[0], "")
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[1], "")
else:
page_template_file = "content_page"
# Write the text content into the content template and onto the
# build file
content_templ_dir = os.path.join(
cwd, "templates", page_template_file + ".html")
if os.path.exists(content_templ_dir):
content_templ_file = open(content_templ_dir, "r")
content_templ_file1 = content_templ_file.read()
content_templ_file2 = content_templ_file1.replace(
"{page_content}", text_cont1)
currents_working_file.write(content_templ_file2)
else:
currents_working_file.write(text_cont1)
# Write the footer to the build file
currents_working_file.write("\n" + footer_file.read())
# Close the build file
currents_working_file.close()
# Find all the nav(something) templates in the `templates` folder and
# Read their content to the dict
navs = {}
for file in os.listdir(os.path.join(cwd, "templates")):
if "nav" in file:
nav_cont = open(os.path.join(cwd, "templates", file), "r")
navs[file.replace(".html", "")] = nav_cont.read()
nav_cont.close()
forbidden_dirs = set(["assets", "templates"])
blended_version_message = "Built with Blended v" + \
str(app_version)
build_date = str(datetime.now().date())
build_time = str(datetime.now().time())
build_datetime = str(datetime.now())
# Replace global variables such as site name and language
for root, dirs, files in os.walk(os.path.join(cwd, outdir)):
dirs[:] = [d for d in dirs if d not in forbidden_dirs]
for filename in files:
if filename != "config.pyc" and filename != "config.py":
newFilename = get_html_clear_filename(filename)
page_file = filename.replace(".html", "")
page_folder = os.path.basename(os.path.dirname(os.path.join(
root, filename))).replace("-", "").replace("_", "").title()
page_folder_orig = os.path.basename(
os.path.dirname(os.path.join(root, filename)))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, outdir), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_folder = os.path.join(cwd, outdir, filename)
else:
subfolder_folder = os.path.join(
cwd, outdir, subfolder, filename)
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
# The Loop!
for line in fileinput.input(subfolder_folder, inplace=1):
for var in custom_variables:
line = line.replace(
"{" + var + "}", custom_variables[var])
if len(plugins) != 0:
for i in range(len(plugins)):
if sys.version_info[0] < 2:
main = importlib.import_module(plugins[i])
elif sys.version_info[0] < 3:
main = __import__(plugins[i])
content = main.main()
line = line.replace(
"{" + plugins[i] + "}", content)
if "{nav" in line:
navname = line.split("{")[1].split("}")[0]
line = line.replace(
"{" + navname + "}", navs[(line.split("{"))[1].split("}")[0]])
line = line.replace(
"{website_description}", website_description)
line = line.replace(
"{website_description_long}", website_description_long)
line = line.replace("{website_license}", website_license)
line = line.replace("{website_language}", website_language)
line = line.replace("{website_url}", website_url)
line = line.replace("{author_name}", author_name)
line = line.replace("{author_bio}", author_bio)
line = line.replace("{random_number}",
str(randint(0, 100000000)))
line = line.replace("{build_date}", build_date)
line = line.replace("{build_time}", build_time)
line = line.replace("{build_datetime}", build_datetime)
line = line.replace("{page_list}", page_list)
line = line.replace("{page_name}", newFilename)
line = line.replace("{page_filename}", page_file)
line = line.replace("{page_file}", filename)
line = line.replace("{" + filename + "_active}", "active")
if page_folder != outdir.title():
line = line.replace("{page_folder}", page_folder)
else:
line = line.replace("{page_folder}", "")
if page_folder_orig != outdir:
line = line.replace(
"{page_folder_orig}", page_folder_orig)
else:
line = line.replace("{page_folder_orig}", "")
line = line.replace("{page_date}", str(file_modified))
line = line.replace("{page_day}", str(file_modified_day))
line = line.replace("{page_year}", str(file_modified_year))
line = line.replace(
"{page_month}", str(file_modified_month))
line = line.replace(
"{page_month_name}", str(month_name))
line = line.replace("{blended_version}", str(app_version))
line = line.replace(
"{blended_version_message}", blended_version_message)
line = line.replace("{website_name}", website_name)
top = os.path.join(cwd, outdir)
startinglevel = top.count(os.sep)
relative_path = ""
level = root.count(os.sep) - startinglevel
for i in range(level):
relative_path = relative_path + "../"
line = line.replace("{relative_root}", relative_path)
print(line.rstrip('\n'))
fileinput.close()
# Copy the asset folder to the build folder
if os.path.exists(os.path.join(cwd, "templates", "assets")):
if os.path.exists(os.path.join(cwd, outdir, "assets")):
shutil.rmtree(os.path.join(cwd, outdir, "assets"))
shutil.copytree(os.path.join(cwd, "templates", "assets"),
os.path.join(cwd, outdir, "assets"))
for root, dirs, files in os.walk(os.path.join(cwd, outdir, "assets")):
for file in files:
if not file.startswith("_"):
if (file.endswith(".sass")) or (file.endswith(".scss")):
sass_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if sass_text != "":
text_file.write(sass.compile(string=sass_text))
else:
print(file + " is empty! Not compiling Sass.")
text_file.close()
if file.endswith(".less"):
less_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if less_text != "":
text_file.write(lesscpy.compile(StringIO(less_text)))
else:
print(file + " is empty! Not compiling Less.")
text_file.close()
if file.endswith(".styl"):
try:
styl_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if styl_text != "":
text_file.write(Stylus().compile(styl_text))
else:
print(file + " is empty! Not compiling Styl.")
text_file.close()
except:
print("Not able to build with Stylus! Is it installed?")
try:
subprocess.call["npm", "install", "-g", "stylus"]
except:
print("NPM (NodeJS) not working. Is it installed?")
if file.endswith(".coffee"):
coffee_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file[:-6] + "js"), "w")
if coffee_text != "":
text_file.write(coffeescript.compile(coffee_text))
else:
print(file + " is empty! Not compiling CoffeeScript.")
text_file.close()
if minify_css:
if file.endswith(".css"):
css_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if css_text != "":
text_file.write(cssmin(css_text))
text_file.close()
if minify_js:
if file.endswith(".js"):
js_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if js_text != "":
text_file.write(jsmin(js_text))
text_file.close() | Build the files! | train | https://github.com/BlendedSiteGenerator/Blended/blob/e5865a8633e461a22c86ef6ee98cdd7051c412ac/blended/__main__.py#L348-L713 | [
"def replace_folder(path):\n \"\"\"If the specified folder exists, it is deleted and recreated\"\"\"\n if os.path.exists(path):\n shutil.rmtree(path)\n os.makedirs(path)\n else:\n os.makedirs(path)\n"
] | """This is the Blended Static Website Generator"""
# encoding=utf8
import os
import os.path
import sys
from sys import platform
import shutil
import fileinput
import webbrowser
from datetime import datetime
from random import randint
from ftplib import FTP, error_perm
import time
import calendar
import subprocess
import importlib
import click
import pkg_resources
import markdown
import textile
from docutils.core import publish_parts
import mammoth
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import sass
import pyjade
import lesscpy
from six import StringIO
from stylus import Stylus
import coffeescript
from jsmin import jsmin
from cssmin import cssmin
import pip
from .functions import create_folder, replace_folder, get_html_filename, get_html_clear_filename, getunzipped, checkConfig, createConfig, createBlendedFolders, parseXML
# Very important, get the directory that the user wants to run commands in
cwd = os.getcwd()
try:
app_version = pkg_resources.require("blended")[0].version
app_version = app_version[:3]
except:
app_version = "NOTSET"
print("WARNING: app_version not set.\n")
@click.group()
def cli():
"""Blended: Static Website Generator"""
@cli.command('info', short_help='Show info about Blended and the current project.')
def version():
"""Prints info about Blended"""
print("You are running Blended v" + app_version)
print("The current working directory is " + cwd)
@cli.command('install-template', short_help='Install a Blended template from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_template(username, repo):
"""Installs a Blended template from GitHub"""
print("Installing template from " + username + "/" + repo)
dpath = os.path.join(cwd, "templates")
getunzipped(username, repo, dpath)
@cli.command('import-wp', short_help='Import a site from WordPress')
@click.option('--filepath', prompt='WordPress export file', help='Which file holds the exported data from WordPress')
def import_wp(filepath):
"""Imports A WordPress export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from WordPress...")
wp = parseXML(filepath)
wname = wp.rss.channel.title.cdata
wdesc = wp.rss.channel.description.cdata
wlan = wp.rss.channel.language.cdata
wurl = wp.rss.channel.link.cdata
aname = wp.rss.channel.wp_author.wp_author_display_name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlan=wlan, wurl=wurl, aname=aname)
for item in wp.rss.channel.item:
with open(os.path.join(cwd, "content", item.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(item.content_encoded.cdata.strip())
print("\nYour website has been imported from WordPress.")
@cli.command('import-blogger', short_help='Import a site from Blogger')
@click.option('--filepath', prompt='Blogger export file', help='Which file holds the exported data from Blogger')
def import_blogger(filepath):
"""Imports A Blogger export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from Blogger...")
blogger = parseXML(filepath)
wname = blogger.feed.title.cdata
aname = blogger.feed.author.name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname, aname=aname)
for entry in blogger.feed.entry:
if "post" in entry.id.cdata:
with open(os.path.join(cwd, "content", entry.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(entry.content.cdata.strip())
print("\nYour website has been imported from Blogger.")
@cli.command('install-plugin', short_help='Install a Blended plugin from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_plugin(username, repo):
"""Installs a Blended plugin from GitHub"""
print("Installing plugin from " + username + "/" + repo)
pip.main(['install', '-U', "git+git://github.com/" +
username + "/" + repo + ".git"])
@cli.command('init', short_help='Initiate a new website')
def init():
"""Initiates a new website"""
print("Blended: Static Website Generator -\n")
checkConfig()
if (sys.version_info > (3, 0)):
wname = input("Website Name: ")
wdesc = input("Website Description: ")
wlan = input("Website Language: ")
wlic = input("Website License: ")
aname = input("Author(s) Name(s): ")
else:
wname = raw_input("Website Name: ")
wdesc = raw_input("Website Description: ")
wlan = raw_input("Website Language: ")
wlic = raw_input("Website License: ")
aname = raw_input("Author(s) Name(s): ")
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlic=wlic, wlan=wlan, aname=aname)
print("\nThe required files for your website have been generated.")
def placeFiles(ftp, path):
"""Upload the built files to FTP"""
for name in os.listdir(path):
if name != "config.py" and name != "config.pyc" and name != "templates" and name != "content":
localpath = os.path.join(path, name)
if os.path.isfile(localpath):
print("STOR", name, localpath)
ftp.storbinary('STOR ' + name, open(localpath, 'rb'))
elif os.path.isdir(localpath):
print("MKD", name)
try:
ftp.mkd(name)
# ignore "directory already exists"
except error_perm as e:
if not e.args[0].startswith('550'):
raise
print("CWD", name)
ftp.cwd(name)
placeFiles(ftp, localpath)
print("CWD", "..")
ftp.cwd("..")
@cli.command('ftp', short_help='Upload the files via ftp')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def send_ftp(outdir):
"""Upload the built website to FTP"""
print("Uploading the files in the " + outdir + "/ directory!\n")
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import ftp_server, ftp_username, ftp_password, ftp_port, ftp_upload_path
except:
sys.exit(
"The FTP settings could not be found. Maybe your config file is too old. Re-run 'blended init' to fix it.")
server = ftp_server
username = ftp_username
password = ftp_password
port = ftp_port
ftp = FTP()
ftp.connect(server, port)
ftp.login(username, password)
filenameCV = os.path.join(cwd, outdir)
try:
ftp.cwd(ftp_upload_path)
placeFiles(ftp, filenameCV)
except:
ftp.quit()
sys.exit("Files not able to be uploaded! Are you sure the directory exists?")
ftp.quit()
print("\nFTP Done!")
@cli.command('clean', short_help='Remove the build folder')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def clean_built(outdir):
"""Removes all built files"""
print("Removing the built files!")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
@cli.command('zip', short_help='Package the build folder into a zip file')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def zip_built(outdir):
"""Packages the build folder into a zip"""
print("Zipping the built files!")
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name
except:
sys.exit(
"Some of the configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
zip_dir = os.path.join(cwd, website_name.replace(" ", "_") + "-build-" +
str(datetime.now().date()))
if os.path.exists(build_dir):
shutil.make_archive(zip_dir, 'zip', build_dir)
else:
print("The " + outdir +
"/ folder could not be found! Have you run 'blended build' yet?")
@cli.command('purge', short_help='Purge all the files created by Blended')
def purge():
"""Removes all files generated by Blended"""
print("Purging the Blended files!")
# Remove the templates folder
templ_dir = os.path.join(cwd, "templates")
if os.path.exists(templ_dir):
shutil.rmtree(templ_dir)
# Remove the content folder
cont_dir = os.path.join(cwd, "content")
if os.path.exists(cont_dir):
shutil.rmtree(cont_dir)
# Remove the build folder
build_dir = os.path.join(cwd, "build")
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
# Remove config.py
config_file_dir = os.path.join(cwd, "config.py")
if os.path.exists(config_file_dir):
os.remove(config_file_dir)
# Remove config.pyc
config2_file_dir = os.path.join(cwd, "config.pyc")
if os.path.exists(config2_file_dir):
os.remove(config2_file_dir)
# Remove config.py
config3_file_dir = os.path.join(cwd, "config.py.oldbak")
if os.path.exists(config3_file_dir):
os.remove(config3_file_dir)
def convert_text(filename):
"""Convert the post/page content using the converters"""
text_content = open(filename, "r")
if ".md" in filename:
text_cont1 = "\n" + markdown.markdown(text_content.read()) + "\n"
elif ".docx" in filename:
with open(os.path.join(cwd, "content", filename), "rb") as docx_file:
result = mammoth.convert_to_html(docx_file)
final_docx_html = result.value
text_cont1 = "\n" + final_docx_html + "\n"
elif ".tile" in filename:
text_cont1 = "\n" + textile.textile(text_content.read()) + "\n"
elif ".jade" in filename:
text_cont1 = "\n" + pyjade.simple_convert(text_content.read()) + "\n"
elif ".rst" in filename:
text_cont1 = "\n" + \
publish_parts(text_content.read(), writer_name='html')[
'html_body'] + "\n"
elif ".html" in filename:
text_cont1 = text_content.read()
elif ".txt" in filename:
text_cont1 = text_content.read()
else:
print(filename + " is not a valid file type!")
text_cont1 = "NULL"
return text_cont1 + "\n\n"
@cli.command('build', short_help='Build the Blended files into a website')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def build(outdir):
"""Blends the generated files and outputs a HTML website"""
print("Building your Blended files into a website!")
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("The files are built! You can find them in the " + outdir +
"/ directory. Run the view command to see what you have created in a web browser.")
outdir_type = "build"
class Watcher:
"""Watch the specified dirs for changes"""
DIRECTORY_TO_WATCH = os.path.join(cwd, "content")
def __init__(self):
self.observer = Observer()
def run(self):
"""Run the builder on changes"""
event_handler = Handler()
threads = []
paths = [os.path.join(cwd, "content"), os.path.join(cwd, "templates")]
for i in paths:
targetPath = str(i)
self.observer.schedule(event_handler, targetPath, recursive=True)
threads.append(self.observer)
self.observer.start()
try:
while True:
time.sleep(5)
except:
self.observer.stop()
print("\nObserver stopped.")
self.observer.join()
class Handler(FileSystemEventHandler):
"""The handler for the file change watcher"""
@staticmethod
def on_any_event(event):
global outdir_type
if event.is_directory:
return None
elif event.event_type == 'created':
# Take any action here when a file is first created.
build_files(outdir_type)
print("%s created" % event.src_path)
elif event.event_type == 'modified':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s modified" % event.src_path)
elif event.event_type == 'deleted':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s deleted" % event.src_path)
@cli.command('interactive', short_help='Build the Blended files into a website on each file change')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def interactive(outdir):
"""Blends the generated files and outputs a HTML website on file change"""
print("Building your Blended files into a website!")
global outdir_type
outdir_type = outdir
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("Watching the content and templates directories for changes, press CTRL+C to stop...\n")
w = Watcher()
w.run()
@cli.command('view', short_help='View the finished Blended website')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def view(outdir):
"""Opens the built index.html file in a web browser"""
index_path = os.path.realpath(os.path.join(cwd, outdir, "index.html"))
if os.path.exists(index_path):
webbrowser.open('file://' + index_path)
else:
print("The index.html file could not be found in the " + outdir +
"/ folder! Have you deleted it or have you built with home_page_list set to 'no' in config.py?")
if __name__ == '__main__':
cli()
|
BlendedSiteGenerator/Blended | blended/__main__.py | build | python | def build(outdir):
print("Building your Blended files into a website!")
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("The files are built! You can find them in the " + outdir +
"/ directory. Run the view command to see what you have created in a web browser.") | Blends the generated files and outputs a HTML website | train | https://github.com/BlendedSiteGenerator/Blended/blob/e5865a8633e461a22c86ef6ee98cdd7051c412ac/blended/__main__.py#L718-L729 | [
"def build_files(outdir):\n \"\"\"Build the files!\"\"\"\n # Make sure there is actually a configuration file\n config_file_dir = os.path.join(cwd, \"config.py\")\n if not os.path.exists(config_file_dir):\n sys.exit(\n \"There dosen't seem to be a configuration file. Have you run the i... | """This is the Blended Static Website Generator"""
# encoding=utf8
import os
import os.path
import sys
from sys import platform
import shutil
import fileinput
import webbrowser
from datetime import datetime
from random import randint
from ftplib import FTP, error_perm
import time
import calendar
import subprocess
import importlib
import click
import pkg_resources
import markdown
import textile
from docutils.core import publish_parts
import mammoth
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import sass
import pyjade
import lesscpy
from six import StringIO
from stylus import Stylus
import coffeescript
from jsmin import jsmin
from cssmin import cssmin
import pip
from .functions import create_folder, replace_folder, get_html_filename, get_html_clear_filename, getunzipped, checkConfig, createConfig, createBlendedFolders, parseXML
# Very important, get the directory that the user wants to run commands in
cwd = os.getcwd()
try:
app_version = pkg_resources.require("blended")[0].version
app_version = app_version[:3]
except:
app_version = "NOTSET"
print("WARNING: app_version not set.\n")
@click.group()
def cli():
"""Blended: Static Website Generator"""
@cli.command('info', short_help='Show info about Blended and the current project.')
def version():
"""Prints info about Blended"""
print("You are running Blended v" + app_version)
print("The current working directory is " + cwd)
@cli.command('install-template', short_help='Install a Blended template from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_template(username, repo):
"""Installs a Blended template from GitHub"""
print("Installing template from " + username + "/" + repo)
dpath = os.path.join(cwd, "templates")
getunzipped(username, repo, dpath)
@cli.command('import-wp', short_help='Import a site from WordPress')
@click.option('--filepath', prompt='WordPress export file', help='Which file holds the exported data from WordPress')
def import_wp(filepath):
"""Imports A WordPress export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from WordPress...")
wp = parseXML(filepath)
wname = wp.rss.channel.title.cdata
wdesc = wp.rss.channel.description.cdata
wlan = wp.rss.channel.language.cdata
wurl = wp.rss.channel.link.cdata
aname = wp.rss.channel.wp_author.wp_author_display_name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlan=wlan, wurl=wurl, aname=aname)
for item in wp.rss.channel.item:
with open(os.path.join(cwd, "content", item.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(item.content_encoded.cdata.strip())
print("\nYour website has been imported from WordPress.")
@cli.command('import-blogger', short_help='Import a site from Blogger')
@click.option('--filepath', prompt='Blogger export file', help='Which file holds the exported data from Blogger')
def import_blogger(filepath):
"""Imports A Blogger export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from Blogger...")
blogger = parseXML(filepath)
wname = blogger.feed.title.cdata
aname = blogger.feed.author.name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname, aname=aname)
for entry in blogger.feed.entry:
if "post" in entry.id.cdata:
with open(os.path.join(cwd, "content", entry.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(entry.content.cdata.strip())
print("\nYour website has been imported from Blogger.")
@cli.command('install-plugin', short_help='Install a Blended plugin from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_plugin(username, repo):
"""Installs a Blended plugin from GitHub"""
print("Installing plugin from " + username + "/" + repo)
pip.main(['install', '-U', "git+git://github.com/" +
username + "/" + repo + ".git"])
@cli.command('init', short_help='Initiate a new website')
def init():
"""Initiates a new website"""
print("Blended: Static Website Generator -\n")
checkConfig()
if (sys.version_info > (3, 0)):
wname = input("Website Name: ")
wdesc = input("Website Description: ")
wlan = input("Website Language: ")
wlic = input("Website License: ")
aname = input("Author(s) Name(s): ")
else:
wname = raw_input("Website Name: ")
wdesc = raw_input("Website Description: ")
wlan = raw_input("Website Language: ")
wlic = raw_input("Website License: ")
aname = raw_input("Author(s) Name(s): ")
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlic=wlic, wlan=wlan, aname=aname)
print("\nThe required files for your website have been generated.")
def placeFiles(ftp, path):
"""Upload the built files to FTP"""
for name in os.listdir(path):
if name != "config.py" and name != "config.pyc" and name != "templates" and name != "content":
localpath = os.path.join(path, name)
if os.path.isfile(localpath):
print("STOR", name, localpath)
ftp.storbinary('STOR ' + name, open(localpath, 'rb'))
elif os.path.isdir(localpath):
print("MKD", name)
try:
ftp.mkd(name)
# ignore "directory already exists"
except error_perm as e:
if not e.args[0].startswith('550'):
raise
print("CWD", name)
ftp.cwd(name)
placeFiles(ftp, localpath)
print("CWD", "..")
ftp.cwd("..")
@cli.command('ftp', short_help='Upload the files via ftp')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def send_ftp(outdir):
"""Upload the built website to FTP"""
print("Uploading the files in the " + outdir + "/ directory!\n")
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import ftp_server, ftp_username, ftp_password, ftp_port, ftp_upload_path
except:
sys.exit(
"The FTP settings could not be found. Maybe your config file is too old. Re-run 'blended init' to fix it.")
server = ftp_server
username = ftp_username
password = ftp_password
port = ftp_port
ftp = FTP()
ftp.connect(server, port)
ftp.login(username, password)
filenameCV = os.path.join(cwd, outdir)
try:
ftp.cwd(ftp_upload_path)
placeFiles(ftp, filenameCV)
except:
ftp.quit()
sys.exit("Files not able to be uploaded! Are you sure the directory exists?")
ftp.quit()
print("\nFTP Done!")
@cli.command('clean', short_help='Remove the build folder')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def clean_built(outdir):
"""Removes all built files"""
print("Removing the built files!")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
@cli.command('zip', short_help='Package the build folder into a zip file')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def zip_built(outdir):
"""Packages the build folder into a zip"""
print("Zipping the built files!")
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name
except:
sys.exit(
"Some of the configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
zip_dir = os.path.join(cwd, website_name.replace(" ", "_") + "-build-" +
str(datetime.now().date()))
if os.path.exists(build_dir):
shutil.make_archive(zip_dir, 'zip', build_dir)
else:
print("The " + outdir +
"/ folder could not be found! Have you run 'blended build' yet?")
@cli.command('purge', short_help='Purge all the files created by Blended')
def purge():
"""Removes all files generated by Blended"""
print("Purging the Blended files!")
# Remove the templates folder
templ_dir = os.path.join(cwd, "templates")
if os.path.exists(templ_dir):
shutil.rmtree(templ_dir)
# Remove the content folder
cont_dir = os.path.join(cwd, "content")
if os.path.exists(cont_dir):
shutil.rmtree(cont_dir)
# Remove the build folder
build_dir = os.path.join(cwd, "build")
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
# Remove config.py
config_file_dir = os.path.join(cwd, "config.py")
if os.path.exists(config_file_dir):
os.remove(config_file_dir)
# Remove config.pyc
config2_file_dir = os.path.join(cwd, "config.pyc")
if os.path.exists(config2_file_dir):
os.remove(config2_file_dir)
# Remove config.py
config3_file_dir = os.path.join(cwd, "config.py.oldbak")
if os.path.exists(config3_file_dir):
os.remove(config3_file_dir)
def convert_text(filename):
"""Convert the post/page content using the converters"""
text_content = open(filename, "r")
if ".md" in filename:
text_cont1 = "\n" + markdown.markdown(text_content.read()) + "\n"
elif ".docx" in filename:
with open(os.path.join(cwd, "content", filename), "rb") as docx_file:
result = mammoth.convert_to_html(docx_file)
final_docx_html = result.value
text_cont1 = "\n" + final_docx_html + "\n"
elif ".tile" in filename:
text_cont1 = "\n" + textile.textile(text_content.read()) + "\n"
elif ".jade" in filename:
text_cont1 = "\n" + pyjade.simple_convert(text_content.read()) + "\n"
elif ".rst" in filename:
text_cont1 = "\n" + \
publish_parts(text_content.read(), writer_name='html')[
'html_body'] + "\n"
elif ".html" in filename:
text_cont1 = text_content.read()
elif ".txt" in filename:
text_cont1 = text_content.read()
else:
print(filename + " is not a valid file type!")
text_cont1 = "NULL"
return text_cont1 + "\n\n"
def build_files(outdir):
"""Build the files!"""
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name, website_description, website_language, home_page_list
except:
sys.exit(
"ERROR: Some of the crucial configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
try:
from config import website_description_long, website_license, website_url, author_name, author_bio, plugins, minify_css, minify_js, custom_variables
except:
website_description_long = ""
website_license = ""
website_url = ""
author_name = ""
author_bio = ""
plugins = []
custom_variables = {}
minify_css = False
minify_js = False
print("WARNING: Some of the optional configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.\n")
# Create the build folder
build_dir = os.path.join(cwd, outdir)
if "." not in outdir and ".." not in outdir and "..." not in outdir and "...." not in outdir and "....." not in outdir:
replace_folder(build_dir)
# Make sure there is actually a header template file
header_file_dir = os.path.join(cwd, "templates", "header.html")
if not os.path.exists(header_file_dir):
sys.exit(
"There dosen't seem to be a header template file. You need one to generate.")
# Make sure there is actually a footer template file
footer_file_dir = os.path.join(cwd, "templates", "footer.html")
if not os.path.exists(footer_file_dir):
sys.exit(
"There dosen't seem to be a footer template file. You need one to generate.")
# Open the header and footer files for reading
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
# Create the HTML page listing
page_list_item_file = os.path.join(cwd, "templates", "page_list_item.html")
if not os.path.exists(page_list_item_file):
page_list = '<ul class="page-list">\n'
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
for filename in files:
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + '<li class="page-list-item"><a href="' + subfolder_link + newFilename + \
'">' + newFilename2 + '</a><span class="page-list-item-time"> - ' + \
str(file_modified) + '</span></li>\n'
page_list = page_list + '</ul>'
else:
with open(page_list_item_file, 'r') as f:
page_list_item = f.read()
page_list = ""
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
p_content = convert_text(os.path.join(root, filename))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + page_list_item.replace("{path}", subfolder_link + newFilename).replace("{name}", newFilename2).replace(
"{date}", str(file_modified)).replace("{content}", p_content).replace("{content_short}", p_content[:250] + "...").replace("{day}", file_modified_day).replace("{month}", file_modified_month).replace("{month_name}", month_name).replace("{year}", file_modified_year)
if home_page_list == "yes" or home_page_list:
# Open the home page file (index.html) for writing
home_working_file = open(os.path.join(cwd, outdir, "index.html"), "w")
home_working_file.write(header_file.read())
# Make sure there is actually a home page template file
home_templ_dir = os.path.join(cwd, "templates", "home_page.html")
if os.path.exists(home_templ_dir):
home_templ_file = open(home_templ_dir, "r")
home_working_file.write(home_templ_file.read())
else:
print("\nNo home page template file found. Writing page list to index.html")
home_working_file.write(page_list)
home_working_file.write(footer_file.read())
home_working_file.close()
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
if not filename.startswith("_"):
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
newFilename = get_html_filename(filename)
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
currents_working_file = open(
os.path.join(cwd, outdir, newFilename), "w")
else:
create_folder(os.path.join(cwd, outdir, subfolder))
currents_working_file = open(os.path.join(
cwd, outdir, subfolder, newFilename), "w")
# Write the header
currents_working_file.write(header_file.read())
text_cont1 = convert_text(os.path.join(root, filename))
if "+++++" in text_cont1.splitlines()[1]:
page_template_file = text_cont1.splitlines()[0]
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[0], "")
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[1], "")
else:
page_template_file = "content_page"
# Write the text content into the content template and onto the
# build file
content_templ_dir = os.path.join(
cwd, "templates", page_template_file + ".html")
if os.path.exists(content_templ_dir):
content_templ_file = open(content_templ_dir, "r")
content_templ_file1 = content_templ_file.read()
content_templ_file2 = content_templ_file1.replace(
"{page_content}", text_cont1)
currents_working_file.write(content_templ_file2)
else:
currents_working_file.write(text_cont1)
# Write the footer to the build file
currents_working_file.write("\n" + footer_file.read())
# Close the build file
currents_working_file.close()
# Find all the nav(something) templates in the `templates` folder and
# Read their content to the dict
navs = {}
for file in os.listdir(os.path.join(cwd, "templates")):
if "nav" in file:
nav_cont = open(os.path.join(cwd, "templates", file), "r")
navs[file.replace(".html", "")] = nav_cont.read()
nav_cont.close()
forbidden_dirs = set(["assets", "templates"])
blended_version_message = "Built with Blended v" + \
str(app_version)
build_date = str(datetime.now().date())
build_time = str(datetime.now().time())
build_datetime = str(datetime.now())
# Replace global variables such as site name and language
for root, dirs, files in os.walk(os.path.join(cwd, outdir)):
dirs[:] = [d for d in dirs if d not in forbidden_dirs]
for filename in files:
if filename != "config.pyc" and filename != "config.py":
newFilename = get_html_clear_filename(filename)
page_file = filename.replace(".html", "")
page_folder = os.path.basename(os.path.dirname(os.path.join(
root, filename))).replace("-", "").replace("_", "").title()
page_folder_orig = os.path.basename(
os.path.dirname(os.path.join(root, filename)))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, outdir), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_folder = os.path.join(cwd, outdir, filename)
else:
subfolder_folder = os.path.join(
cwd, outdir, subfolder, filename)
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
# The Loop!
for line in fileinput.input(subfolder_folder, inplace=1):
for var in custom_variables:
line = line.replace(
"{" + var + "}", custom_variables[var])
if len(plugins) != 0:
for i in range(len(plugins)):
if sys.version_info[0] < 2:
main = importlib.import_module(plugins[i])
elif sys.version_info[0] < 3:
main = __import__(plugins[i])
content = main.main()
line = line.replace(
"{" + plugins[i] + "}", content)
if "{nav" in line:
navname = line.split("{")[1].split("}")[0]
line = line.replace(
"{" + navname + "}", navs[(line.split("{"))[1].split("}")[0]])
line = line.replace(
"{website_description}", website_description)
line = line.replace(
"{website_description_long}", website_description_long)
line = line.replace("{website_license}", website_license)
line = line.replace("{website_language}", website_language)
line = line.replace("{website_url}", website_url)
line = line.replace("{author_name}", author_name)
line = line.replace("{author_bio}", author_bio)
line = line.replace("{random_number}",
str(randint(0, 100000000)))
line = line.replace("{build_date}", build_date)
line = line.replace("{build_time}", build_time)
line = line.replace("{build_datetime}", build_datetime)
line = line.replace("{page_list}", page_list)
line = line.replace("{page_name}", newFilename)
line = line.replace("{page_filename}", page_file)
line = line.replace("{page_file}", filename)
line = line.replace("{" + filename + "_active}", "active")
if page_folder != outdir.title():
line = line.replace("{page_folder}", page_folder)
else:
line = line.replace("{page_folder}", "")
if page_folder_orig != outdir:
line = line.replace(
"{page_folder_orig}", page_folder_orig)
else:
line = line.replace("{page_folder_orig}", "")
line = line.replace("{page_date}", str(file_modified))
line = line.replace("{page_day}", str(file_modified_day))
line = line.replace("{page_year}", str(file_modified_year))
line = line.replace(
"{page_month}", str(file_modified_month))
line = line.replace(
"{page_month_name}", str(month_name))
line = line.replace("{blended_version}", str(app_version))
line = line.replace(
"{blended_version_message}", blended_version_message)
line = line.replace("{website_name}", website_name)
top = os.path.join(cwd, outdir)
startinglevel = top.count(os.sep)
relative_path = ""
level = root.count(os.sep) - startinglevel
for i in range(level):
relative_path = relative_path + "../"
line = line.replace("{relative_root}", relative_path)
print(line.rstrip('\n'))
fileinput.close()
# Copy the asset folder to the build folder
if os.path.exists(os.path.join(cwd, "templates", "assets")):
if os.path.exists(os.path.join(cwd, outdir, "assets")):
shutil.rmtree(os.path.join(cwd, outdir, "assets"))
shutil.copytree(os.path.join(cwd, "templates", "assets"),
os.path.join(cwd, outdir, "assets"))
for root, dirs, files in os.walk(os.path.join(cwd, outdir, "assets")):
for file in files:
if not file.startswith("_"):
if (file.endswith(".sass")) or (file.endswith(".scss")):
sass_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if sass_text != "":
text_file.write(sass.compile(string=sass_text))
else:
print(file + " is empty! Not compiling Sass.")
text_file.close()
if file.endswith(".less"):
less_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if less_text != "":
text_file.write(lesscpy.compile(StringIO(less_text)))
else:
print(file + " is empty! Not compiling Less.")
text_file.close()
if file.endswith(".styl"):
try:
styl_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if styl_text != "":
text_file.write(Stylus().compile(styl_text))
else:
print(file + " is empty! Not compiling Styl.")
text_file.close()
except:
print("Not able to build with Stylus! Is it installed?")
try:
subprocess.call["npm", "install", "-g", "stylus"]
except:
print("NPM (NodeJS) not working. Is it installed?")
if file.endswith(".coffee"):
coffee_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file[:-6] + "js"), "w")
if coffee_text != "":
text_file.write(coffeescript.compile(coffee_text))
else:
print(file + " is empty! Not compiling CoffeeScript.")
text_file.close()
if minify_css:
if file.endswith(".css"):
css_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if css_text != "":
text_file.write(cssmin(css_text))
text_file.close()
if minify_js:
if file.endswith(".js"):
js_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if js_text != "":
text_file.write(jsmin(js_text))
text_file.close()
@cli.command('build', short_help='Build the Blended files into a website')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
outdir_type = "build"
class Watcher:
"""Watch the specified dirs for changes"""
DIRECTORY_TO_WATCH = os.path.join(cwd, "content")
def __init__(self):
self.observer = Observer()
def run(self):
"""Run the builder on changes"""
event_handler = Handler()
threads = []
paths = [os.path.join(cwd, "content"), os.path.join(cwd, "templates")]
for i in paths:
targetPath = str(i)
self.observer.schedule(event_handler, targetPath, recursive=True)
threads.append(self.observer)
self.observer.start()
try:
while True:
time.sleep(5)
except:
self.observer.stop()
print("\nObserver stopped.")
self.observer.join()
class Handler(FileSystemEventHandler):
"""The handler for the file change watcher"""
@staticmethod
def on_any_event(event):
global outdir_type
if event.is_directory:
return None
elif event.event_type == 'created':
# Take any action here when a file is first created.
build_files(outdir_type)
print("%s created" % event.src_path)
elif event.event_type == 'modified':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s modified" % event.src_path)
elif event.event_type == 'deleted':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s deleted" % event.src_path)
@cli.command('interactive', short_help='Build the Blended files into a website on each file change')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def interactive(outdir):
"""Blends the generated files and outputs a HTML website on file change"""
print("Building your Blended files into a website!")
global outdir_type
outdir_type = outdir
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("Watching the content and templates directories for changes, press CTRL+C to stop...\n")
w = Watcher()
w.run()
@cli.command('view', short_help='View the finished Blended website')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def view(outdir):
"""Opens the built index.html file in a web browser"""
index_path = os.path.realpath(os.path.join(cwd, outdir, "index.html"))
if os.path.exists(index_path):
webbrowser.open('file://' + index_path)
else:
print("The index.html file could not be found in the " + outdir +
"/ folder! Have you deleted it or have you built with home_page_list set to 'no' in config.py?")
if __name__ == '__main__':
cli()
|
BlendedSiteGenerator/Blended | blended/__main__.py | interactive | python | def interactive(outdir):
print("Building your Blended files into a website!")
global outdir_type
outdir_type = outdir
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("Watching the content and templates directories for changes, press CTRL+C to stop...\n")
w = Watcher()
w.run() | Blends the generated files and outputs a HTML website on file change | train | https://github.com/BlendedSiteGenerator/Blended/blob/e5865a8633e461a22c86ef6ee98cdd7051c412ac/blended/__main__.py#L792-L808 | [
"def build_files(outdir):\n \"\"\"Build the files!\"\"\"\n # Make sure there is actually a configuration file\n config_file_dir = os.path.join(cwd, \"config.py\")\n if not os.path.exists(config_file_dir):\n sys.exit(\n \"There dosen't seem to be a configuration file. Have you run the i... | """This is the Blended Static Website Generator"""
# encoding=utf8
import os
import os.path
import sys
from sys import platform
import shutil
import fileinput
import webbrowser
from datetime import datetime
from random import randint
from ftplib import FTP, error_perm
import time
import calendar
import subprocess
import importlib
import click
import pkg_resources
import markdown
import textile
from docutils.core import publish_parts
import mammoth
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import sass
import pyjade
import lesscpy
from six import StringIO
from stylus import Stylus
import coffeescript
from jsmin import jsmin
from cssmin import cssmin
import pip
from .functions import create_folder, replace_folder, get_html_filename, get_html_clear_filename, getunzipped, checkConfig, createConfig, createBlendedFolders, parseXML
# Very important, get the directory that the user wants to run commands in
cwd = os.getcwd()
try:
app_version = pkg_resources.require("blended")[0].version
app_version = app_version[:3]
except:
app_version = "NOTSET"
print("WARNING: app_version not set.\n")
@click.group()
def cli():
"""Blended: Static Website Generator"""
@cli.command('info', short_help='Show info about Blended and the current project.')
def version():
"""Prints info about Blended"""
print("You are running Blended v" + app_version)
print("The current working directory is " + cwd)
@cli.command('install-template', short_help='Install a Blended template from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_template(username, repo):
"""Installs a Blended template from GitHub"""
print("Installing template from " + username + "/" + repo)
dpath = os.path.join(cwd, "templates")
getunzipped(username, repo, dpath)
@cli.command('import-wp', short_help='Import a site from WordPress')
@click.option('--filepath', prompt='WordPress export file', help='Which file holds the exported data from WordPress')
def import_wp(filepath):
"""Imports A WordPress export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from WordPress...")
wp = parseXML(filepath)
wname = wp.rss.channel.title.cdata
wdesc = wp.rss.channel.description.cdata
wlan = wp.rss.channel.language.cdata
wurl = wp.rss.channel.link.cdata
aname = wp.rss.channel.wp_author.wp_author_display_name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlan=wlan, wurl=wurl, aname=aname)
for item in wp.rss.channel.item:
with open(os.path.join(cwd, "content", item.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(item.content_encoded.cdata.strip())
print("\nYour website has been imported from WordPress.")
@cli.command('import-blogger', short_help='Import a site from Blogger')
@click.option('--filepath', prompt='Blogger export file', help='Which file holds the exported data from Blogger')
def import_blogger(filepath):
"""Imports A Blogger export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from Blogger...")
blogger = parseXML(filepath)
wname = blogger.feed.title.cdata
aname = blogger.feed.author.name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname, aname=aname)
for entry in blogger.feed.entry:
if "post" in entry.id.cdata:
with open(os.path.join(cwd, "content", entry.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(entry.content.cdata.strip())
print("\nYour website has been imported from Blogger.")
@cli.command('install-plugin', short_help='Install a Blended plugin from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_plugin(username, repo):
"""Installs a Blended plugin from GitHub"""
print("Installing plugin from " + username + "/" + repo)
pip.main(['install', '-U', "git+git://github.com/" +
username + "/" + repo + ".git"])
@cli.command('init', short_help='Initiate a new website')
def init():
"""Initiates a new website"""
print("Blended: Static Website Generator -\n")
checkConfig()
if (sys.version_info > (3, 0)):
wname = input("Website Name: ")
wdesc = input("Website Description: ")
wlan = input("Website Language: ")
wlic = input("Website License: ")
aname = input("Author(s) Name(s): ")
else:
wname = raw_input("Website Name: ")
wdesc = raw_input("Website Description: ")
wlan = raw_input("Website Language: ")
wlic = raw_input("Website License: ")
aname = raw_input("Author(s) Name(s): ")
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlic=wlic, wlan=wlan, aname=aname)
print("\nThe required files for your website have been generated.")
def placeFiles(ftp, path):
"""Upload the built files to FTP"""
for name in os.listdir(path):
if name != "config.py" and name != "config.pyc" and name != "templates" and name != "content":
localpath = os.path.join(path, name)
if os.path.isfile(localpath):
print("STOR", name, localpath)
ftp.storbinary('STOR ' + name, open(localpath, 'rb'))
elif os.path.isdir(localpath):
print("MKD", name)
try:
ftp.mkd(name)
# ignore "directory already exists"
except error_perm as e:
if not e.args[0].startswith('550'):
raise
print("CWD", name)
ftp.cwd(name)
placeFiles(ftp, localpath)
print("CWD", "..")
ftp.cwd("..")
@cli.command('ftp', short_help='Upload the files via ftp')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def send_ftp(outdir):
"""Upload the built website to FTP"""
print("Uploading the files in the " + outdir + "/ directory!\n")
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import ftp_server, ftp_username, ftp_password, ftp_port, ftp_upload_path
except:
sys.exit(
"The FTP settings could not be found. Maybe your config file is too old. Re-run 'blended init' to fix it.")
server = ftp_server
username = ftp_username
password = ftp_password
port = ftp_port
ftp = FTP()
ftp.connect(server, port)
ftp.login(username, password)
filenameCV = os.path.join(cwd, outdir)
try:
ftp.cwd(ftp_upload_path)
placeFiles(ftp, filenameCV)
except:
ftp.quit()
sys.exit("Files not able to be uploaded! Are you sure the directory exists?")
ftp.quit()
print("\nFTP Done!")
@cli.command('clean', short_help='Remove the build folder')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def clean_built(outdir):
"""Removes all built files"""
print("Removing the built files!")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
@cli.command('zip', short_help='Package the build folder into a zip file')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def zip_built(outdir):
"""Packages the build folder into a zip"""
print("Zipping the built files!")
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name
except:
sys.exit(
"Some of the configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
zip_dir = os.path.join(cwd, website_name.replace(" ", "_") + "-build-" +
str(datetime.now().date()))
if os.path.exists(build_dir):
shutil.make_archive(zip_dir, 'zip', build_dir)
else:
print("The " + outdir +
"/ folder could not be found! Have you run 'blended build' yet?")
@cli.command('purge', short_help='Purge all the files created by Blended')
def purge():
"""Removes all files generated by Blended"""
print("Purging the Blended files!")
# Remove the templates folder
templ_dir = os.path.join(cwd, "templates")
if os.path.exists(templ_dir):
shutil.rmtree(templ_dir)
# Remove the content folder
cont_dir = os.path.join(cwd, "content")
if os.path.exists(cont_dir):
shutil.rmtree(cont_dir)
# Remove the build folder
build_dir = os.path.join(cwd, "build")
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
# Remove config.py
config_file_dir = os.path.join(cwd, "config.py")
if os.path.exists(config_file_dir):
os.remove(config_file_dir)
# Remove config.pyc
config2_file_dir = os.path.join(cwd, "config.pyc")
if os.path.exists(config2_file_dir):
os.remove(config2_file_dir)
# Remove config.py
config3_file_dir = os.path.join(cwd, "config.py.oldbak")
if os.path.exists(config3_file_dir):
os.remove(config3_file_dir)
def convert_text(filename):
"""Convert the post/page content using the converters"""
text_content = open(filename, "r")
if ".md" in filename:
text_cont1 = "\n" + markdown.markdown(text_content.read()) + "\n"
elif ".docx" in filename:
with open(os.path.join(cwd, "content", filename), "rb") as docx_file:
result = mammoth.convert_to_html(docx_file)
final_docx_html = result.value
text_cont1 = "\n" + final_docx_html + "\n"
elif ".tile" in filename:
text_cont1 = "\n" + textile.textile(text_content.read()) + "\n"
elif ".jade" in filename:
text_cont1 = "\n" + pyjade.simple_convert(text_content.read()) + "\n"
elif ".rst" in filename:
text_cont1 = "\n" + \
publish_parts(text_content.read(), writer_name='html')[
'html_body'] + "\n"
elif ".html" in filename:
text_cont1 = text_content.read()
elif ".txt" in filename:
text_cont1 = text_content.read()
else:
print(filename + " is not a valid file type!")
text_cont1 = "NULL"
return text_cont1 + "\n\n"
def build_files(outdir):
"""Build the files!"""
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name, website_description, website_language, home_page_list
except:
sys.exit(
"ERROR: Some of the crucial configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
try:
from config import website_description_long, website_license, website_url, author_name, author_bio, plugins, minify_css, minify_js, custom_variables
except:
website_description_long = ""
website_license = ""
website_url = ""
author_name = ""
author_bio = ""
plugins = []
custom_variables = {}
minify_css = False
minify_js = False
print("WARNING: Some of the optional configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.\n")
# Create the build folder
build_dir = os.path.join(cwd, outdir)
if "." not in outdir and ".." not in outdir and "..." not in outdir and "...." not in outdir and "....." not in outdir:
replace_folder(build_dir)
# Make sure there is actually a header template file
header_file_dir = os.path.join(cwd, "templates", "header.html")
if not os.path.exists(header_file_dir):
sys.exit(
"There dosen't seem to be a header template file. You need one to generate.")
# Make sure there is actually a footer template file
footer_file_dir = os.path.join(cwd, "templates", "footer.html")
if not os.path.exists(footer_file_dir):
sys.exit(
"There dosen't seem to be a footer template file. You need one to generate.")
# Open the header and footer files for reading
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
# Create the HTML page listing
page_list_item_file = os.path.join(cwd, "templates", "page_list_item.html")
if not os.path.exists(page_list_item_file):
page_list = '<ul class="page-list">\n'
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
for filename in files:
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + '<li class="page-list-item"><a href="' + subfolder_link + newFilename + \
'">' + newFilename2 + '</a><span class="page-list-item-time"> - ' + \
str(file_modified) + '</span></li>\n'
page_list = page_list + '</ul>'
else:
with open(page_list_item_file, 'r') as f:
page_list_item = f.read()
page_list = ""
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
p_content = convert_text(os.path.join(root, filename))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + page_list_item.replace("{path}", subfolder_link + newFilename).replace("{name}", newFilename2).replace(
"{date}", str(file_modified)).replace("{content}", p_content).replace("{content_short}", p_content[:250] + "...").replace("{day}", file_modified_day).replace("{month}", file_modified_month).replace("{month_name}", month_name).replace("{year}", file_modified_year)
if home_page_list == "yes" or home_page_list:
# Open the home page file (index.html) for writing
home_working_file = open(os.path.join(cwd, outdir, "index.html"), "w")
home_working_file.write(header_file.read())
# Make sure there is actually a home page template file
home_templ_dir = os.path.join(cwd, "templates", "home_page.html")
if os.path.exists(home_templ_dir):
home_templ_file = open(home_templ_dir, "r")
home_working_file.write(home_templ_file.read())
else:
print("\nNo home page template file found. Writing page list to index.html")
home_working_file.write(page_list)
home_working_file.write(footer_file.read())
home_working_file.close()
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
if not filename.startswith("_"):
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
newFilename = get_html_filename(filename)
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
currents_working_file = open(
os.path.join(cwd, outdir, newFilename), "w")
else:
create_folder(os.path.join(cwd, outdir, subfolder))
currents_working_file = open(os.path.join(
cwd, outdir, subfolder, newFilename), "w")
# Write the header
currents_working_file.write(header_file.read())
text_cont1 = convert_text(os.path.join(root, filename))
if "+++++" in text_cont1.splitlines()[1]:
page_template_file = text_cont1.splitlines()[0]
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[0], "")
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[1], "")
else:
page_template_file = "content_page"
# Write the text content into the content template and onto the
# build file
content_templ_dir = os.path.join(
cwd, "templates", page_template_file + ".html")
if os.path.exists(content_templ_dir):
content_templ_file = open(content_templ_dir, "r")
content_templ_file1 = content_templ_file.read()
content_templ_file2 = content_templ_file1.replace(
"{page_content}", text_cont1)
currents_working_file.write(content_templ_file2)
else:
currents_working_file.write(text_cont1)
# Write the footer to the build file
currents_working_file.write("\n" + footer_file.read())
# Close the build file
currents_working_file.close()
# Find all the nav(something) templates in the `templates` folder and
# Read their content to the dict
navs = {}
for file in os.listdir(os.path.join(cwd, "templates")):
if "nav" in file:
nav_cont = open(os.path.join(cwd, "templates", file), "r")
navs[file.replace(".html", "")] = nav_cont.read()
nav_cont.close()
forbidden_dirs = set(["assets", "templates"])
blended_version_message = "Built with Blended v" + \
str(app_version)
build_date = str(datetime.now().date())
build_time = str(datetime.now().time())
build_datetime = str(datetime.now())
# Replace global variables such as site name and language
for root, dirs, files in os.walk(os.path.join(cwd, outdir)):
dirs[:] = [d for d in dirs if d not in forbidden_dirs]
for filename in files:
if filename != "config.pyc" and filename != "config.py":
newFilename = get_html_clear_filename(filename)
page_file = filename.replace(".html", "")
page_folder = os.path.basename(os.path.dirname(os.path.join(
root, filename))).replace("-", "").replace("_", "").title()
page_folder_orig = os.path.basename(
os.path.dirname(os.path.join(root, filename)))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, outdir), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_folder = os.path.join(cwd, outdir, filename)
else:
subfolder_folder = os.path.join(
cwd, outdir, subfolder, filename)
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
# The Loop!
for line in fileinput.input(subfolder_folder, inplace=1):
for var in custom_variables:
line = line.replace(
"{" + var + "}", custom_variables[var])
if len(plugins) != 0:
for i in range(len(plugins)):
if sys.version_info[0] < 2:
main = importlib.import_module(plugins[i])
elif sys.version_info[0] < 3:
main = __import__(plugins[i])
content = main.main()
line = line.replace(
"{" + plugins[i] + "}", content)
if "{nav" in line:
navname = line.split("{")[1].split("}")[0]
line = line.replace(
"{" + navname + "}", navs[(line.split("{"))[1].split("}")[0]])
line = line.replace(
"{website_description}", website_description)
line = line.replace(
"{website_description_long}", website_description_long)
line = line.replace("{website_license}", website_license)
line = line.replace("{website_language}", website_language)
line = line.replace("{website_url}", website_url)
line = line.replace("{author_name}", author_name)
line = line.replace("{author_bio}", author_bio)
line = line.replace("{random_number}",
str(randint(0, 100000000)))
line = line.replace("{build_date}", build_date)
line = line.replace("{build_time}", build_time)
line = line.replace("{build_datetime}", build_datetime)
line = line.replace("{page_list}", page_list)
line = line.replace("{page_name}", newFilename)
line = line.replace("{page_filename}", page_file)
line = line.replace("{page_file}", filename)
line = line.replace("{" + filename + "_active}", "active")
if page_folder != outdir.title():
line = line.replace("{page_folder}", page_folder)
else:
line = line.replace("{page_folder}", "")
if page_folder_orig != outdir:
line = line.replace(
"{page_folder_orig}", page_folder_orig)
else:
line = line.replace("{page_folder_orig}", "")
line = line.replace("{page_date}", str(file_modified))
line = line.replace("{page_day}", str(file_modified_day))
line = line.replace("{page_year}", str(file_modified_year))
line = line.replace(
"{page_month}", str(file_modified_month))
line = line.replace(
"{page_month_name}", str(month_name))
line = line.replace("{blended_version}", str(app_version))
line = line.replace(
"{blended_version_message}", blended_version_message)
line = line.replace("{website_name}", website_name)
top = os.path.join(cwd, outdir)
startinglevel = top.count(os.sep)
relative_path = ""
level = root.count(os.sep) - startinglevel
for i in range(level):
relative_path = relative_path + "../"
line = line.replace("{relative_root}", relative_path)
print(line.rstrip('\n'))
fileinput.close()
# Copy the asset folder to the build folder
if os.path.exists(os.path.join(cwd, "templates", "assets")):
if os.path.exists(os.path.join(cwd, outdir, "assets")):
shutil.rmtree(os.path.join(cwd, outdir, "assets"))
shutil.copytree(os.path.join(cwd, "templates", "assets"),
os.path.join(cwd, outdir, "assets"))
for root, dirs, files in os.walk(os.path.join(cwd, outdir, "assets")):
for file in files:
if not file.startswith("_"):
if (file.endswith(".sass")) or (file.endswith(".scss")):
sass_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if sass_text != "":
text_file.write(sass.compile(string=sass_text))
else:
print(file + " is empty! Not compiling Sass.")
text_file.close()
if file.endswith(".less"):
less_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if less_text != "":
text_file.write(lesscpy.compile(StringIO(less_text)))
else:
print(file + " is empty! Not compiling Less.")
text_file.close()
if file.endswith(".styl"):
try:
styl_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if styl_text != "":
text_file.write(Stylus().compile(styl_text))
else:
print(file + " is empty! Not compiling Styl.")
text_file.close()
except:
print("Not able to build with Stylus! Is it installed?")
try:
subprocess.call["npm", "install", "-g", "stylus"]
except:
print("NPM (NodeJS) not working. Is it installed?")
if file.endswith(".coffee"):
coffee_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file[:-6] + "js"), "w")
if coffee_text != "":
text_file.write(coffeescript.compile(coffee_text))
else:
print(file + " is empty! Not compiling CoffeeScript.")
text_file.close()
if minify_css:
if file.endswith(".css"):
css_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if css_text != "":
text_file.write(cssmin(css_text))
text_file.close()
if minify_js:
if file.endswith(".js"):
js_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if js_text != "":
text_file.write(jsmin(js_text))
text_file.close()
@cli.command('build', short_help='Build the Blended files into a website')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def build(outdir):
"""Blends the generated files and outputs a HTML website"""
print("Building your Blended files into a website!")
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("The files are built! You can find them in the " + outdir +
"/ directory. Run the view command to see what you have created in a web browser.")
outdir_type = "build"
class Watcher:
"""Watch the specified dirs for changes"""
DIRECTORY_TO_WATCH = os.path.join(cwd, "content")
def __init__(self):
self.observer = Observer()
def run(self):
"""Run the builder on changes"""
event_handler = Handler()
threads = []
paths = [os.path.join(cwd, "content"), os.path.join(cwd, "templates")]
for i in paths:
targetPath = str(i)
self.observer.schedule(event_handler, targetPath, recursive=True)
threads.append(self.observer)
self.observer.start()
try:
while True:
time.sleep(5)
except:
self.observer.stop()
print("\nObserver stopped.")
self.observer.join()
class Handler(FileSystemEventHandler):
"""The handler for the file change watcher"""
@staticmethod
def on_any_event(event):
global outdir_type
if event.is_directory:
return None
elif event.event_type == 'created':
# Take any action here when a file is first created.
build_files(outdir_type)
print("%s created" % event.src_path)
elif event.event_type == 'modified':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s modified" % event.src_path)
elif event.event_type == 'deleted':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s deleted" % event.src_path)
@cli.command('interactive', short_help='Build the Blended files into a website on each file change')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
@cli.command('view', short_help='View the finished Blended website')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def view(outdir):
"""Opens the built index.html file in a web browser"""
index_path = os.path.realpath(os.path.join(cwd, outdir, "index.html"))
if os.path.exists(index_path):
webbrowser.open('file://' + index_path)
else:
print("The index.html file could not be found in the " + outdir +
"/ folder! Have you deleted it or have you built with home_page_list set to 'no' in config.py?")
if __name__ == '__main__':
cli()
|
BlendedSiteGenerator/Blended | blended/__main__.py | view | python | def view(outdir):
index_path = os.path.realpath(os.path.join(cwd, outdir, "index.html"))
if os.path.exists(index_path):
webbrowser.open('file://' + index_path)
else:
print("The index.html file could not be found in the " + outdir +
"/ folder! Have you deleted it or have you built with home_page_list set to 'no' in config.py?") | Opens the built index.html file in a web browser | train | https://github.com/BlendedSiteGenerator/Blended/blob/e5865a8633e461a22c86ef6ee98cdd7051c412ac/blended/__main__.py#L813-L821 | null | """This is the Blended Static Website Generator"""
# encoding=utf8
import os
import os.path
import sys
from sys import platform
import shutil
import fileinput
import webbrowser
from datetime import datetime
from random import randint
from ftplib import FTP, error_perm
import time
import calendar
import subprocess
import importlib
import click
import pkg_resources
import markdown
import textile
from docutils.core import publish_parts
import mammoth
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import sass
import pyjade
import lesscpy
from six import StringIO
from stylus import Stylus
import coffeescript
from jsmin import jsmin
from cssmin import cssmin
import pip
from .functions import create_folder, replace_folder, get_html_filename, get_html_clear_filename, getunzipped, checkConfig, createConfig, createBlendedFolders, parseXML
# Very important, get the directory that the user wants to run commands in
cwd = os.getcwd()
try:
app_version = pkg_resources.require("blended")[0].version
app_version = app_version[:3]
except:
app_version = "NOTSET"
print("WARNING: app_version not set.\n")
@click.group()
def cli():
"""Blended: Static Website Generator"""
@cli.command('info', short_help='Show info about Blended and the current project.')
def version():
"""Prints info about Blended"""
print("You are running Blended v" + app_version)
print("The current working directory is " + cwd)
@cli.command('install-template', short_help='Install a Blended template from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_template(username, repo):
"""Installs a Blended template from GitHub"""
print("Installing template from " + username + "/" + repo)
dpath = os.path.join(cwd, "templates")
getunzipped(username, repo, dpath)
@cli.command('import-wp', short_help='Import a site from WordPress')
@click.option('--filepath', prompt='WordPress export file', help='Which file holds the exported data from WordPress')
def import_wp(filepath):
"""Imports A WordPress export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from WordPress...")
wp = parseXML(filepath)
wname = wp.rss.channel.title.cdata
wdesc = wp.rss.channel.description.cdata
wlan = wp.rss.channel.language.cdata
wurl = wp.rss.channel.link.cdata
aname = wp.rss.channel.wp_author.wp_author_display_name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlan=wlan, wurl=wurl, aname=aname)
for item in wp.rss.channel.item:
with open(os.path.join(cwd, "content", item.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(item.content_encoded.cdata.strip())
print("\nYour website has been imported from WordPress.")
@cli.command('import-blogger', short_help='Import a site from Blogger')
@click.option('--filepath', prompt='Blogger export file', help='Which file holds the exported data from Blogger')
def import_blogger(filepath):
"""Imports A Blogger export and converts it to a Blended site"""
print("\nBlended: Static Website Generator -\n")
checkConfig()
print("Importing from Blogger...")
blogger = parseXML(filepath)
wname = blogger.feed.title.cdata
aname = blogger.feed.author.name.cdata.strip()
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname, aname=aname)
for entry in blogger.feed.entry:
if "post" in entry.id.cdata:
with open(os.path.join(cwd, "content", entry.title.cdata.replace(" ", "_") + ".html"), 'w') as wfile:
wfile.write(entry.content.cdata.strip())
print("\nYour website has been imported from Blogger.")
@cli.command('install-plugin', short_help='Install a Blended plugin from GitHub')
@click.option('--username', prompt='GitHub username/organization',
help='The GitHub username/organization.')
@click.option('--repo', prompt='GitHub repository',
help='The GitHub repository name.')
def install_plugin(username, repo):
"""Installs a Blended plugin from GitHub"""
print("Installing plugin from " + username + "/" + repo)
pip.main(['install', '-U', "git+git://github.com/" +
username + "/" + repo + ".git"])
@cli.command('init', short_help='Initiate a new website')
def init():
"""Initiates a new website"""
print("Blended: Static Website Generator -\n")
checkConfig()
if (sys.version_info > (3, 0)):
wname = input("Website Name: ")
wdesc = input("Website Description: ")
wlan = input("Website Language: ")
wlic = input("Website License: ")
aname = input("Author(s) Name(s): ")
else:
wname = raw_input("Website Name: ")
wdesc = raw_input("Website Description: ")
wlan = raw_input("Website Language: ")
wlic = raw_input("Website License: ")
aname = raw_input("Author(s) Name(s): ")
createBlendedFolders()
# Populate the configuration file
createConfig(app_version=app_version, wname=wname,
wdesc=wdesc, wlic=wlic, wlan=wlan, aname=aname)
print("\nThe required files for your website have been generated.")
def placeFiles(ftp, path):
"""Upload the built files to FTP"""
for name in os.listdir(path):
if name != "config.py" and name != "config.pyc" and name != "templates" and name != "content":
localpath = os.path.join(path, name)
if os.path.isfile(localpath):
print("STOR", name, localpath)
ftp.storbinary('STOR ' + name, open(localpath, 'rb'))
elif os.path.isdir(localpath):
print("MKD", name)
try:
ftp.mkd(name)
# ignore "directory already exists"
except error_perm as e:
if not e.args[0].startswith('550'):
raise
print("CWD", name)
ftp.cwd(name)
placeFiles(ftp, localpath)
print("CWD", "..")
ftp.cwd("..")
@cli.command('ftp', short_help='Upload the files via ftp')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def send_ftp(outdir):
"""Upload the built website to FTP"""
print("Uploading the files in the " + outdir + "/ directory!\n")
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import ftp_server, ftp_username, ftp_password, ftp_port, ftp_upload_path
except:
sys.exit(
"The FTP settings could not be found. Maybe your config file is too old. Re-run 'blended init' to fix it.")
server = ftp_server
username = ftp_username
password = ftp_password
port = ftp_port
ftp = FTP()
ftp.connect(server, port)
ftp.login(username, password)
filenameCV = os.path.join(cwd, outdir)
try:
ftp.cwd(ftp_upload_path)
placeFiles(ftp, filenameCV)
except:
ftp.quit()
sys.exit("Files not able to be uploaded! Are you sure the directory exists?")
ftp.quit()
print("\nFTP Done!")
@cli.command('clean', short_help='Remove the build folder')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def clean_built(outdir):
"""Removes all built files"""
print("Removing the built files!")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
@cli.command('zip', short_help='Package the build folder into a zip file')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
def zip_built(outdir):
"""Packages the build folder into a zip"""
print("Zipping the built files!")
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name
except:
sys.exit(
"Some of the configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
# Remove the build folder
build_dir = os.path.join(cwd, outdir)
zip_dir = os.path.join(cwd, website_name.replace(" ", "_") + "-build-" +
str(datetime.now().date()))
if os.path.exists(build_dir):
shutil.make_archive(zip_dir, 'zip', build_dir)
else:
print("The " + outdir +
"/ folder could not be found! Have you run 'blended build' yet?")
@cli.command('purge', short_help='Purge all the files created by Blended')
def purge():
"""Removes all files generated by Blended"""
print("Purging the Blended files!")
# Remove the templates folder
templ_dir = os.path.join(cwd, "templates")
if os.path.exists(templ_dir):
shutil.rmtree(templ_dir)
# Remove the content folder
cont_dir = os.path.join(cwd, "content")
if os.path.exists(cont_dir):
shutil.rmtree(cont_dir)
# Remove the build folder
build_dir = os.path.join(cwd, "build")
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
# Remove config.py
config_file_dir = os.path.join(cwd, "config.py")
if os.path.exists(config_file_dir):
os.remove(config_file_dir)
# Remove config.pyc
config2_file_dir = os.path.join(cwd, "config.pyc")
if os.path.exists(config2_file_dir):
os.remove(config2_file_dir)
# Remove config.py
config3_file_dir = os.path.join(cwd, "config.py.oldbak")
if os.path.exists(config3_file_dir):
os.remove(config3_file_dir)
def convert_text(filename):
"""Convert the post/page content using the converters"""
text_content = open(filename, "r")
if ".md" in filename:
text_cont1 = "\n" + markdown.markdown(text_content.read()) + "\n"
elif ".docx" in filename:
with open(os.path.join(cwd, "content", filename), "rb") as docx_file:
result = mammoth.convert_to_html(docx_file)
final_docx_html = result.value
text_cont1 = "\n" + final_docx_html + "\n"
elif ".tile" in filename:
text_cont1 = "\n" + textile.textile(text_content.read()) + "\n"
elif ".jade" in filename:
text_cont1 = "\n" + pyjade.simple_convert(text_content.read()) + "\n"
elif ".rst" in filename:
text_cont1 = "\n" + \
publish_parts(text_content.read(), writer_name='html')[
'html_body'] + "\n"
elif ".html" in filename:
text_cont1 = text_content.read()
elif ".txt" in filename:
text_cont1 = text_content.read()
else:
print(filename + " is not a valid file type!")
text_cont1 = "NULL"
return text_cont1 + "\n\n"
def build_files(outdir):
"""Build the files!"""
# Make sure there is actually a configuration file
config_file_dir = os.path.join(cwd, "config.py")
if not os.path.exists(config_file_dir):
sys.exit(
"There dosen't seem to be a configuration file. Have you run the init command?")
else:
sys.path.insert(0, cwd)
try:
from config import website_name, website_description, website_language, home_page_list
except:
sys.exit(
"ERROR: Some of the crucial configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.")
try:
from config import website_description_long, website_license, website_url, author_name, author_bio, plugins, minify_css, minify_js, custom_variables
except:
website_description_long = ""
website_license = ""
website_url = ""
author_name = ""
author_bio = ""
plugins = []
custom_variables = {}
minify_css = False
minify_js = False
print("WARNING: Some of the optional configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.\n")
# Create the build folder
build_dir = os.path.join(cwd, outdir)
if "." not in outdir and ".." not in outdir and "..." not in outdir and "...." not in outdir and "....." not in outdir:
replace_folder(build_dir)
# Make sure there is actually a header template file
header_file_dir = os.path.join(cwd, "templates", "header.html")
if not os.path.exists(header_file_dir):
sys.exit(
"There dosen't seem to be a header template file. You need one to generate.")
# Make sure there is actually a footer template file
footer_file_dir = os.path.join(cwd, "templates", "footer.html")
if not os.path.exists(footer_file_dir):
sys.exit(
"There dosen't seem to be a footer template file. You need one to generate.")
# Open the header and footer files for reading
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
# Create the HTML page listing
page_list_item_file = os.path.join(cwd, "templates", "page_list_item.html")
if not os.path.exists(page_list_item_file):
page_list = '<ul class="page-list">\n'
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
for filename in files:
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + '<li class="page-list-item"><a href="' + subfolder_link + newFilename + \
'">' + newFilename2 + '</a><span class="page-list-item-time"> - ' + \
str(file_modified) + '</span></li>\n'
page_list = page_list + '</ul>'
else:
with open(page_list_item_file, 'r') as f:
page_list_item = f.read()
page_list = ""
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
p_content = convert_text(os.path.join(root, filename))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_link = ""
else:
subfolder_link = subfolder + "/"
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
newFilename = get_html_filename(filename)
newFilename2 = get_html_clear_filename(filename)
page_list = page_list + page_list_item.replace("{path}", subfolder_link + newFilename).replace("{name}", newFilename2).replace(
"{date}", str(file_modified)).replace("{content}", p_content).replace("{content_short}", p_content[:250] + "...").replace("{day}", file_modified_day).replace("{month}", file_modified_month).replace("{month_name}", month_name).replace("{year}", file_modified_year)
if home_page_list == "yes" or home_page_list:
# Open the home page file (index.html) for writing
home_working_file = open(os.path.join(cwd, outdir, "index.html"), "w")
home_working_file.write(header_file.read())
# Make sure there is actually a home page template file
home_templ_dir = os.path.join(cwd, "templates", "home_page.html")
if os.path.exists(home_templ_dir):
home_templ_file = open(home_templ_dir, "r")
home_working_file.write(home_templ_file.read())
else:
print("\nNo home page template file found. Writing page list to index.html")
home_working_file.write(page_list)
home_working_file.write(footer_file.read())
home_working_file.close()
for root, dirs, files in os.walk(os.path.join(cwd, "content")):
dirs[:] = [d for d in dirs if "_" not in d]
for filename in files:
if not filename.startswith("_"):
header_file = open(header_file_dir, "r")
footer_file = open(footer_file_dir, "r")
newFilename = get_html_filename(filename)
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, "content"), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
currents_working_file = open(
os.path.join(cwd, outdir, newFilename), "w")
else:
create_folder(os.path.join(cwd, outdir, subfolder))
currents_working_file = open(os.path.join(
cwd, outdir, subfolder, newFilename), "w")
# Write the header
currents_working_file.write(header_file.read())
text_cont1 = convert_text(os.path.join(root, filename))
if "+++++" in text_cont1.splitlines()[1]:
page_template_file = text_cont1.splitlines()[0]
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[0], "")
text_cont1 = text_cont1.replace(
text_cont1.splitlines()[1], "")
else:
page_template_file = "content_page"
# Write the text content into the content template and onto the
# build file
content_templ_dir = os.path.join(
cwd, "templates", page_template_file + ".html")
if os.path.exists(content_templ_dir):
content_templ_file = open(content_templ_dir, "r")
content_templ_file1 = content_templ_file.read()
content_templ_file2 = content_templ_file1.replace(
"{page_content}", text_cont1)
currents_working_file.write(content_templ_file2)
else:
currents_working_file.write(text_cont1)
# Write the footer to the build file
currents_working_file.write("\n" + footer_file.read())
# Close the build file
currents_working_file.close()
# Find all the nav(something) templates in the `templates` folder and
# Read their content to the dict
navs = {}
for file in os.listdir(os.path.join(cwd, "templates")):
if "nav" in file:
nav_cont = open(os.path.join(cwd, "templates", file), "r")
navs[file.replace(".html", "")] = nav_cont.read()
nav_cont.close()
forbidden_dirs = set(["assets", "templates"])
blended_version_message = "Built with Blended v" + \
str(app_version)
build_date = str(datetime.now().date())
build_time = str(datetime.now().time())
build_datetime = str(datetime.now())
# Replace global variables such as site name and language
for root, dirs, files in os.walk(os.path.join(cwd, outdir)):
dirs[:] = [d for d in dirs if d not in forbidden_dirs]
for filename in files:
if filename != "config.pyc" and filename != "config.py":
newFilename = get_html_clear_filename(filename)
page_file = filename.replace(".html", "")
page_folder = os.path.basename(os.path.dirname(os.path.join(
root, filename))).replace("-", "").replace("_", "").title()
page_folder_orig = os.path.basename(
os.path.dirname(os.path.join(root, filename)))
top = os.path.dirname(os.path.join(root, filename))
top2 = top.replace(os.path.join(cwd, outdir), "", 1)
if platform != "win32":
subfolder = top2.replace("/", "", 1)
else:
subfolder = top2.replace("\\", "", 1)
if subfolder == "":
subfolder_folder = os.path.join(cwd, outdir, filename)
else:
subfolder_folder = os.path.join(
cwd, outdir, subfolder, filename)
file_modified = time.ctime(
os.path.getmtime(os.path.join(root, filename)))
file_modified_day = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[8:10]
file_modified_year = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[:4]
file_modified_month = str(datetime.strptime(
file_modified, "%a %b %d %H:%M:%S %Y"))[5:7]
month_name = calendar.month_name[int(file_modified_month)]
# The Loop!
for line in fileinput.input(subfolder_folder, inplace=1):
for var in custom_variables:
line = line.replace(
"{" + var + "}", custom_variables[var])
if len(plugins) != 0:
for i in range(len(plugins)):
if sys.version_info[0] < 2:
main = importlib.import_module(plugins[i])
elif sys.version_info[0] < 3:
main = __import__(plugins[i])
content = main.main()
line = line.replace(
"{" + plugins[i] + "}", content)
if "{nav" in line:
navname = line.split("{")[1].split("}")[0]
line = line.replace(
"{" + navname + "}", navs[(line.split("{"))[1].split("}")[0]])
line = line.replace(
"{website_description}", website_description)
line = line.replace(
"{website_description_long}", website_description_long)
line = line.replace("{website_license}", website_license)
line = line.replace("{website_language}", website_language)
line = line.replace("{website_url}", website_url)
line = line.replace("{author_name}", author_name)
line = line.replace("{author_bio}", author_bio)
line = line.replace("{random_number}",
str(randint(0, 100000000)))
line = line.replace("{build_date}", build_date)
line = line.replace("{build_time}", build_time)
line = line.replace("{build_datetime}", build_datetime)
line = line.replace("{page_list}", page_list)
line = line.replace("{page_name}", newFilename)
line = line.replace("{page_filename}", page_file)
line = line.replace("{page_file}", filename)
line = line.replace("{" + filename + "_active}", "active")
if page_folder != outdir.title():
line = line.replace("{page_folder}", page_folder)
else:
line = line.replace("{page_folder}", "")
if page_folder_orig != outdir:
line = line.replace(
"{page_folder_orig}", page_folder_orig)
else:
line = line.replace("{page_folder_orig}", "")
line = line.replace("{page_date}", str(file_modified))
line = line.replace("{page_day}", str(file_modified_day))
line = line.replace("{page_year}", str(file_modified_year))
line = line.replace(
"{page_month}", str(file_modified_month))
line = line.replace(
"{page_month_name}", str(month_name))
line = line.replace("{blended_version}", str(app_version))
line = line.replace(
"{blended_version_message}", blended_version_message)
line = line.replace("{website_name}", website_name)
top = os.path.join(cwd, outdir)
startinglevel = top.count(os.sep)
relative_path = ""
level = root.count(os.sep) - startinglevel
for i in range(level):
relative_path = relative_path + "../"
line = line.replace("{relative_root}", relative_path)
print(line.rstrip('\n'))
fileinput.close()
# Copy the asset folder to the build folder
if os.path.exists(os.path.join(cwd, "templates", "assets")):
if os.path.exists(os.path.join(cwd, outdir, "assets")):
shutil.rmtree(os.path.join(cwd, outdir, "assets"))
shutil.copytree(os.path.join(cwd, "templates", "assets"),
os.path.join(cwd, outdir, "assets"))
for root, dirs, files in os.walk(os.path.join(cwd, outdir, "assets")):
for file in files:
if not file.startswith("_"):
if (file.endswith(".sass")) or (file.endswith(".scss")):
sass_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if sass_text != "":
text_file.write(sass.compile(string=sass_text))
else:
print(file + " is empty! Not compiling Sass.")
text_file.close()
if file.endswith(".less"):
less_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if less_text != "":
text_file.write(lesscpy.compile(StringIO(less_text)))
else:
print(file + " is empty! Not compiling Less.")
text_file.close()
if file.endswith(".styl"):
try:
styl_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(
root, file[:-4] + "css"), "w")
if styl_text != "":
text_file.write(Stylus().compile(styl_text))
else:
print(file + " is empty! Not compiling Styl.")
text_file.close()
except:
print("Not able to build with Stylus! Is it installed?")
try:
subprocess.call["npm", "install", "-g", "stylus"]
except:
print("NPM (NodeJS) not working. Is it installed?")
if file.endswith(".coffee"):
coffee_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file[:-6] + "js"), "w")
if coffee_text != "":
text_file.write(coffeescript.compile(coffee_text))
else:
print(file + " is empty! Not compiling CoffeeScript.")
text_file.close()
if minify_css:
if file.endswith(".css"):
css_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if css_text != "":
text_file.write(cssmin(css_text))
text_file.close()
if minify_js:
if file.endswith(".js"):
js_text = open(os.path.join(root, file)).read()
text_file = open(os.path.join(root, file), "w")
if js_text != "":
text_file.write(jsmin(js_text))
text_file.close()
@cli.command('build', short_help='Build the Blended files into a website')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def build(outdir):
"""Blends the generated files and outputs a HTML website"""
print("Building your Blended files into a website!")
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("The files are built! You can find them in the " + outdir +
"/ directory. Run the view command to see what you have created in a web browser.")
outdir_type = "build"
class Watcher:
"""Watch the specified dirs for changes"""
DIRECTORY_TO_WATCH = os.path.join(cwd, "content")
def __init__(self):
self.observer = Observer()
def run(self):
"""Run the builder on changes"""
event_handler = Handler()
threads = []
paths = [os.path.join(cwd, "content"), os.path.join(cwd, "templates")]
for i in paths:
targetPath = str(i)
self.observer.schedule(event_handler, targetPath, recursive=True)
threads.append(self.observer)
self.observer.start()
try:
while True:
time.sleep(5)
except:
self.observer.stop()
print("\nObserver stopped.")
self.observer.join()
class Handler(FileSystemEventHandler):
"""The handler for the file change watcher"""
@staticmethod
def on_any_event(event):
global outdir_type
if event.is_directory:
return None
elif event.event_type == 'created':
# Take any action here when a file is first created.
build_files(outdir_type)
print("%s created" % event.src_path)
elif event.event_type == 'modified':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s modified" % event.src_path)
elif event.event_type == 'deleted':
# Taken any action here when a file is modified.
build_files(outdir_type)
print("%s deleted" % event.src_path)
@cli.command('interactive', short_help='Build the Blended files into a website on each file change')
@click.option('--outdir', default="build", help='Choose which folder to build to. Default is `build`.')
def interactive(outdir):
"""Blends the generated files and outputs a HTML website on file change"""
print("Building your Blended files into a website!")
global outdir_type
outdir_type = outdir
reload(sys)
sys.setdefaultencoding('utf8')
build_files(outdir)
print("Watching the content and templates directories for changes, press CTRL+C to stop...\n")
w = Watcher()
w.run()
@cli.command('view', short_help='View the finished Blended website')
@click.option('--outdir', default="build", help='Choose which folder the built files are in. Default is `build`.')
if __name__ == '__main__':
cli()
|
BlendedSiteGenerator/Blended | blended/__main__.py | Watcher.run | python | def run(self):
event_handler = Handler()
threads = []
paths = [os.path.join(cwd, "content"), os.path.join(cwd, "templates")]
for i in paths:
targetPath = str(i)
self.observer.schedule(event_handler, targetPath, recursive=True)
threads.append(self.observer)
self.observer.start()
try:
while True:
time.sleep(5)
except:
self.observer.stop()
print("\nObserver stopped.")
self.observer.join() | Run the builder on changes | train | https://github.com/BlendedSiteGenerator/Blended/blob/e5865a8633e461a22c86ef6ee98cdd7051c412ac/blended/__main__.py#L742-L762 | null | class Watcher:
"""Watch the specified dirs for changes"""
DIRECTORY_TO_WATCH = os.path.join(cwd, "content")
def __init__(self):
self.observer = Observer()
|
BlendedSiteGenerator/Blended | blended/functions.py | replace_folder | python | def replace_folder(path):
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
else:
os.makedirs(path) | If the specified folder exists, it is deleted and recreated | train | https://github.com/BlendedSiteGenerator/Blended/blob/e5865a8633e461a22c86ef6ee98cdd7051c412ac/blended/functions.py#L24-L30 | null | """This holds all of the functions"""
from __future__ import print_function
import os
import shutil
from shutil import copyfile
import sys
import urllib
import zipfile
from distutils.dir_util import copy_tree
import untangle
# Very important, get the directory that the user wants to run commands in
cwd = os.getcwd()
def create_folder(path):
"""Creates the specified folder if it dosen't already exist"""
if not os.path.exists(path):
os.makedirs(path)
def get_html_filename(filename):
"""Converts the filename to a .html extension"""
if ".html" in filename:
newFilename = filename
elif ".md" in filename:
newFilename = filename.replace(".md", ".html")
elif ".tile" in filename:
newFilename = filename.replace(".tile", ".html")
elif ".jade" in filename:
newFilename = filename.replace(".jade", ".html")
elif ".txt" in filename:
newFilename = filename.replace(".txt", ".html")
elif ".rst" in filename:
newFilename = filename.replace(".rst", ".html")
elif ".docx" in filename:
newFilename = filename.replace(".docx", ".html")
else:
print(filename + " is not a valid file type!")
return newFilename
def get_html_clear_filename(filename):
"""Clears the file extension from the filename and makes it nice looking"""
newFilename = filename.replace(".html", "")
newFilename = newFilename.replace(".md", "")
newFilename = newFilename.replace(".txt", "")
newFilename = newFilename.replace(".tile", "")
newFilename = newFilename.replace(".jade", "")
newFilename = newFilename.replace(".rst", "")
newFilename = newFilename.replace(".docx", "")
newFilename = newFilename.replace("index", "home")
newFilename = newFilename.replace("-", " ")
newFilename = newFilename.replace("_", " ")
newFilename = newFilename.title()
return newFilename
def getunzipped(username, repo, thedir):
"""Downloads and unzips a zip file"""
theurl = "https://github.com/" + username + "/" + repo + "/archive/master.zip"
name = os.path.join(thedir, 'temp.zip')
try:
name = urllib.urlretrieve(theurl, name)
name = os.path.join(thedir, 'temp.zip')
except IOError as e:
print("Can't retrieve %r to %r: %s" % (theurl, thedir, e))
return
try:
z = zipfile.ZipFile(name)
except zipfile.error as e:
print("Bad zipfile (from %r): %s" % (theurl, e))
return
z.extractall(thedir)
z.close()
os.remove(name)
copy_tree(os.path.join(thedir, repo + "-master"), thedir)
shutil.rmtree(os.path.join(thedir, repo + "-master"))
def checkConfig():
"""If the config.py file exists, back it up"""
config_file_dir = os.path.join(cwd, "config.py")
if os.path.exists(config_file_dir):
print("Making a backup of your config file!")
config_file_dir2 = os.path.join(cwd, "config.py.oldbak")
copyfile(config_file_dir, config_file_dir2)
def createConfig(app_version=5.0, wname="", wdesc="", wdescl="", wlic="", wlan="", wurl="", aname="", abio=""):
"""Generates a config file from the information"""
config_file_dir = os.path.join(cwd, "config.py")
config_file = open(config_file_dir, "w")
config_file.write('blended_version = ' + app_version + '\n')
config_file.write('\n')
config_file.write(
'# Configuration is automatically generated by Blended (http://jmroper.com/blended), feel free to edit any values below')
config_file.write('\n')
config_file.write('website_name = "' + wname + '"\n')
config_file.write('website_description = "' + wdesc + '"\n')
config_file.write(
'website_description_long = "' + wdescl + '"\n')
config_file.write('website_license = "' + wlic + '"\n')
config_file.write('website_language = "' + wlan + '"\n')
config_file.write('website_url = "' + wurl + '"\n')
config_file.write('\n')
config_file.write('author_name = "' + aname + '"\n')
config_file.write('author_bio = "' + abio + '"\n')
config_file.write('\n')
config_file.write('home_page_list = True\n')
config_file.write('\n')
config_file.write('plugins = [] # Place all needed plugins in here\n')
config_file.write(
'custom_variables = {} # Place all custom variables in here\n')
config_file.write('\n')
config_file.write('minify_css = False\n')
config_file.write('minify_js = False\n')
config_file.write('\n')
config_file.write('# The following values are used for FTP uploads')
config_file.write('\n')
config_file.write('ftp_server = "localhost"\n')
config_file.write('ftp_username = "user"\n')
config_file.write('ftp_password = "pass"\n')
config_file.write('ftp_port = 21\n')
config_file.write('ftp_upload_path = "public_html/myWebsite"\n')
config_file.close()
def createBlendedFolders():
"""Creates the standard folders for a Blended website"""
# Create the templates folder
create_folder(os.path.join(cwd, "templates"))
# Create the templates/assets folder
create_folder(os.path.join(cwd, "templates", "assets"))
# Create the templates/assets/css folder
create_folder(os.path.join(cwd, "templates", "assets", "css"))
# Create the templates/assets/js folder
create_folder(os.path.join(cwd, "templates", "assets", "js"))
# Create the templates/assets/img folder
create_folder(os.path.join(cwd, "templates", "assets", "img"))
# Create the content folder
create_folder(os.path.join(cwd, "content"))
def parseXML(filepath):
"""Parses an XML file using untangle"""
try:
output = untangle.parse(filepath)
except:
sys.exit("The export file could not be found or read!")
return output
|
BlendedSiteGenerator/Blended | blended/functions.py | get_html_filename | python | def get_html_filename(filename):
if ".html" in filename:
newFilename = filename
elif ".md" in filename:
newFilename = filename.replace(".md", ".html")
elif ".tile" in filename:
newFilename = filename.replace(".tile", ".html")
elif ".jade" in filename:
newFilename = filename.replace(".jade", ".html")
elif ".txt" in filename:
newFilename = filename.replace(".txt", ".html")
elif ".rst" in filename:
newFilename = filename.replace(".rst", ".html")
elif ".docx" in filename:
newFilename = filename.replace(".docx", ".html")
else:
print(filename + " is not a valid file type!")
return newFilename | Converts the filename to a .html extension | train | https://github.com/BlendedSiteGenerator/Blended/blob/e5865a8633e461a22c86ef6ee98cdd7051c412ac/blended/functions.py#L33-L52 | null | """This holds all of the functions"""
from __future__ import print_function
import os
import shutil
from shutil import copyfile
import sys
import urllib
import zipfile
from distutils.dir_util import copy_tree
import untangle
# Very important, get the directory that the user wants to run commands in
cwd = os.getcwd()
def create_folder(path):
"""Creates the specified folder if it dosen't already exist"""
if not os.path.exists(path):
os.makedirs(path)
def replace_folder(path):
"""If the specified folder exists, it is deleted and recreated"""
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
else:
os.makedirs(path)
def get_html_clear_filename(filename):
"""Clears the file extension from the filename and makes it nice looking"""
newFilename = filename.replace(".html", "")
newFilename = newFilename.replace(".md", "")
newFilename = newFilename.replace(".txt", "")
newFilename = newFilename.replace(".tile", "")
newFilename = newFilename.replace(".jade", "")
newFilename = newFilename.replace(".rst", "")
newFilename = newFilename.replace(".docx", "")
newFilename = newFilename.replace("index", "home")
newFilename = newFilename.replace("-", " ")
newFilename = newFilename.replace("_", " ")
newFilename = newFilename.title()
return newFilename
def getunzipped(username, repo, thedir):
"""Downloads and unzips a zip file"""
theurl = "https://github.com/" + username + "/" + repo + "/archive/master.zip"
name = os.path.join(thedir, 'temp.zip')
try:
name = urllib.urlretrieve(theurl, name)
name = os.path.join(thedir, 'temp.zip')
except IOError as e:
print("Can't retrieve %r to %r: %s" % (theurl, thedir, e))
return
try:
z = zipfile.ZipFile(name)
except zipfile.error as e:
print("Bad zipfile (from %r): %s" % (theurl, e))
return
z.extractall(thedir)
z.close()
os.remove(name)
copy_tree(os.path.join(thedir, repo + "-master"), thedir)
shutil.rmtree(os.path.join(thedir, repo + "-master"))
def checkConfig():
"""If the config.py file exists, back it up"""
config_file_dir = os.path.join(cwd, "config.py")
if os.path.exists(config_file_dir):
print("Making a backup of your config file!")
config_file_dir2 = os.path.join(cwd, "config.py.oldbak")
copyfile(config_file_dir, config_file_dir2)
def createConfig(app_version=5.0, wname="", wdesc="", wdescl="", wlic="", wlan="", wurl="", aname="", abio=""):
"""Generates a config file from the information"""
config_file_dir = os.path.join(cwd, "config.py")
config_file = open(config_file_dir, "w")
config_file.write('blended_version = ' + app_version + '\n')
config_file.write('\n')
config_file.write(
'# Configuration is automatically generated by Blended (http://jmroper.com/blended), feel free to edit any values below')
config_file.write('\n')
config_file.write('website_name = "' + wname + '"\n')
config_file.write('website_description = "' + wdesc + '"\n')
config_file.write(
'website_description_long = "' + wdescl + '"\n')
config_file.write('website_license = "' + wlic + '"\n')
config_file.write('website_language = "' + wlan + '"\n')
config_file.write('website_url = "' + wurl + '"\n')
config_file.write('\n')
config_file.write('author_name = "' + aname + '"\n')
config_file.write('author_bio = "' + abio + '"\n')
config_file.write('\n')
config_file.write('home_page_list = True\n')
config_file.write('\n')
config_file.write('plugins = [] # Place all needed plugins in here\n')
config_file.write(
'custom_variables = {} # Place all custom variables in here\n')
config_file.write('\n')
config_file.write('minify_css = False\n')
config_file.write('minify_js = False\n')
config_file.write('\n')
config_file.write('# The following values are used for FTP uploads')
config_file.write('\n')
config_file.write('ftp_server = "localhost"\n')
config_file.write('ftp_username = "user"\n')
config_file.write('ftp_password = "pass"\n')
config_file.write('ftp_port = 21\n')
config_file.write('ftp_upload_path = "public_html/myWebsite"\n')
config_file.close()
def createBlendedFolders():
"""Creates the standard folders for a Blended website"""
# Create the templates folder
create_folder(os.path.join(cwd, "templates"))
# Create the templates/assets folder
create_folder(os.path.join(cwd, "templates", "assets"))
# Create the templates/assets/css folder
create_folder(os.path.join(cwd, "templates", "assets", "css"))
# Create the templates/assets/js folder
create_folder(os.path.join(cwd, "templates", "assets", "js"))
# Create the templates/assets/img folder
create_folder(os.path.join(cwd, "templates", "assets", "img"))
# Create the content folder
create_folder(os.path.join(cwd, "content"))
def parseXML(filepath):
"""Parses an XML file using untangle"""
try:
output = untangle.parse(filepath)
except:
sys.exit("The export file could not be found or read!")
return output
|
BlendedSiteGenerator/Blended | blended/functions.py | get_html_clear_filename | python | def get_html_clear_filename(filename):
newFilename = filename.replace(".html", "")
newFilename = newFilename.replace(".md", "")
newFilename = newFilename.replace(".txt", "")
newFilename = newFilename.replace(".tile", "")
newFilename = newFilename.replace(".jade", "")
newFilename = newFilename.replace(".rst", "")
newFilename = newFilename.replace(".docx", "")
newFilename = newFilename.replace("index", "home")
newFilename = newFilename.replace("-", " ")
newFilename = newFilename.replace("_", " ")
newFilename = newFilename.title()
return newFilename | Clears the file extension from the filename and makes it nice looking | train | https://github.com/BlendedSiteGenerator/Blended/blob/e5865a8633e461a22c86ef6ee98cdd7051c412ac/blended/functions.py#L55-L69 | null | """This holds all of the functions"""
from __future__ import print_function
import os
import shutil
from shutil import copyfile
import sys
import urllib
import zipfile
from distutils.dir_util import copy_tree
import untangle
# Very important, get the directory that the user wants to run commands in
cwd = os.getcwd()
def create_folder(path):
"""Creates the specified folder if it dosen't already exist"""
if not os.path.exists(path):
os.makedirs(path)
def replace_folder(path):
"""If the specified folder exists, it is deleted and recreated"""
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
else:
os.makedirs(path)
def get_html_filename(filename):
"""Converts the filename to a .html extension"""
if ".html" in filename:
newFilename = filename
elif ".md" in filename:
newFilename = filename.replace(".md", ".html")
elif ".tile" in filename:
newFilename = filename.replace(".tile", ".html")
elif ".jade" in filename:
newFilename = filename.replace(".jade", ".html")
elif ".txt" in filename:
newFilename = filename.replace(".txt", ".html")
elif ".rst" in filename:
newFilename = filename.replace(".rst", ".html")
elif ".docx" in filename:
newFilename = filename.replace(".docx", ".html")
else:
print(filename + " is not a valid file type!")
return newFilename
def getunzipped(username, repo, thedir):
"""Downloads and unzips a zip file"""
theurl = "https://github.com/" + username + "/" + repo + "/archive/master.zip"
name = os.path.join(thedir, 'temp.zip')
try:
name = urllib.urlretrieve(theurl, name)
name = os.path.join(thedir, 'temp.zip')
except IOError as e:
print("Can't retrieve %r to %r: %s" % (theurl, thedir, e))
return
try:
z = zipfile.ZipFile(name)
except zipfile.error as e:
print("Bad zipfile (from %r): %s" % (theurl, e))
return
z.extractall(thedir)
z.close()
os.remove(name)
copy_tree(os.path.join(thedir, repo + "-master"), thedir)
shutil.rmtree(os.path.join(thedir, repo + "-master"))
def checkConfig():
"""If the config.py file exists, back it up"""
config_file_dir = os.path.join(cwd, "config.py")
if os.path.exists(config_file_dir):
print("Making a backup of your config file!")
config_file_dir2 = os.path.join(cwd, "config.py.oldbak")
copyfile(config_file_dir, config_file_dir2)
def createConfig(app_version=5.0, wname="", wdesc="", wdescl="", wlic="", wlan="", wurl="", aname="", abio=""):
"""Generates a config file from the information"""
config_file_dir = os.path.join(cwd, "config.py")
config_file = open(config_file_dir, "w")
config_file.write('blended_version = ' + app_version + '\n')
config_file.write('\n')
config_file.write(
'# Configuration is automatically generated by Blended (http://jmroper.com/blended), feel free to edit any values below')
config_file.write('\n')
config_file.write('website_name = "' + wname + '"\n')
config_file.write('website_description = "' + wdesc + '"\n')
config_file.write(
'website_description_long = "' + wdescl + '"\n')
config_file.write('website_license = "' + wlic + '"\n')
config_file.write('website_language = "' + wlan + '"\n')
config_file.write('website_url = "' + wurl + '"\n')
config_file.write('\n')
config_file.write('author_name = "' + aname + '"\n')
config_file.write('author_bio = "' + abio + '"\n')
config_file.write('\n')
config_file.write('home_page_list = True\n')
config_file.write('\n')
config_file.write('plugins = [] # Place all needed plugins in here\n')
config_file.write(
'custom_variables = {} # Place all custom variables in here\n')
config_file.write('\n')
config_file.write('minify_css = False\n')
config_file.write('minify_js = False\n')
config_file.write('\n')
config_file.write('# The following values are used for FTP uploads')
config_file.write('\n')
config_file.write('ftp_server = "localhost"\n')
config_file.write('ftp_username = "user"\n')
config_file.write('ftp_password = "pass"\n')
config_file.write('ftp_port = 21\n')
config_file.write('ftp_upload_path = "public_html/myWebsite"\n')
config_file.close()
def createBlendedFolders():
"""Creates the standard folders for a Blended website"""
# Create the templates folder
create_folder(os.path.join(cwd, "templates"))
# Create the templates/assets folder
create_folder(os.path.join(cwd, "templates", "assets"))
# Create the templates/assets/css folder
create_folder(os.path.join(cwd, "templates", "assets", "css"))
# Create the templates/assets/js folder
create_folder(os.path.join(cwd, "templates", "assets", "js"))
# Create the templates/assets/img folder
create_folder(os.path.join(cwd, "templates", "assets", "img"))
# Create the content folder
create_folder(os.path.join(cwd, "content"))
def parseXML(filepath):
"""Parses an XML file using untangle"""
try:
output = untangle.parse(filepath)
except:
sys.exit("The export file could not be found or read!")
return output
|
BlendedSiteGenerator/Blended | blended/functions.py | getunzipped | python | def getunzipped(username, repo, thedir):
theurl = "https://github.com/" + username + "/" + repo + "/archive/master.zip"
name = os.path.join(thedir, 'temp.zip')
try:
name = urllib.urlretrieve(theurl, name)
name = os.path.join(thedir, 'temp.zip')
except IOError as e:
print("Can't retrieve %r to %r: %s" % (theurl, thedir, e))
return
try:
z = zipfile.ZipFile(name)
except zipfile.error as e:
print("Bad zipfile (from %r): %s" % (theurl, e))
return
z.extractall(thedir)
z.close()
os.remove(name)
copy_tree(os.path.join(thedir, repo + "-master"), thedir)
shutil.rmtree(os.path.join(thedir, repo + "-master")) | Downloads and unzips a zip file | train | https://github.com/BlendedSiteGenerator/Blended/blob/e5865a8633e461a22c86ef6ee98cdd7051c412ac/blended/functions.py#L72-L92 | null | """This holds all of the functions"""
from __future__ import print_function
import os
import shutil
from shutil import copyfile
import sys
import urllib
import zipfile
from distutils.dir_util import copy_tree
import untangle
# Very important, get the directory that the user wants to run commands in
cwd = os.getcwd()
def create_folder(path):
"""Creates the specified folder if it dosen't already exist"""
if not os.path.exists(path):
os.makedirs(path)
def replace_folder(path):
"""If the specified folder exists, it is deleted and recreated"""
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
else:
os.makedirs(path)
def get_html_filename(filename):
"""Converts the filename to a .html extension"""
if ".html" in filename:
newFilename = filename
elif ".md" in filename:
newFilename = filename.replace(".md", ".html")
elif ".tile" in filename:
newFilename = filename.replace(".tile", ".html")
elif ".jade" in filename:
newFilename = filename.replace(".jade", ".html")
elif ".txt" in filename:
newFilename = filename.replace(".txt", ".html")
elif ".rst" in filename:
newFilename = filename.replace(".rst", ".html")
elif ".docx" in filename:
newFilename = filename.replace(".docx", ".html")
else:
print(filename + " is not a valid file type!")
return newFilename
def get_html_clear_filename(filename):
"""Clears the file extension from the filename and makes it nice looking"""
newFilename = filename.replace(".html", "")
newFilename = newFilename.replace(".md", "")
newFilename = newFilename.replace(".txt", "")
newFilename = newFilename.replace(".tile", "")
newFilename = newFilename.replace(".jade", "")
newFilename = newFilename.replace(".rst", "")
newFilename = newFilename.replace(".docx", "")
newFilename = newFilename.replace("index", "home")
newFilename = newFilename.replace("-", " ")
newFilename = newFilename.replace("_", " ")
newFilename = newFilename.title()
return newFilename
def checkConfig():
"""If the config.py file exists, back it up"""
config_file_dir = os.path.join(cwd, "config.py")
if os.path.exists(config_file_dir):
print("Making a backup of your config file!")
config_file_dir2 = os.path.join(cwd, "config.py.oldbak")
copyfile(config_file_dir, config_file_dir2)
def createConfig(app_version=5.0, wname="", wdesc="", wdescl="", wlic="", wlan="", wurl="", aname="", abio=""):
"""Generates a config file from the information"""
config_file_dir = os.path.join(cwd, "config.py")
config_file = open(config_file_dir, "w")
config_file.write('blended_version = ' + app_version + '\n')
config_file.write('\n')
config_file.write(
'# Configuration is automatically generated by Blended (http://jmroper.com/blended), feel free to edit any values below')
config_file.write('\n')
config_file.write('website_name = "' + wname + '"\n')
config_file.write('website_description = "' + wdesc + '"\n')
config_file.write(
'website_description_long = "' + wdescl + '"\n')
config_file.write('website_license = "' + wlic + '"\n')
config_file.write('website_language = "' + wlan + '"\n')
config_file.write('website_url = "' + wurl + '"\n')
config_file.write('\n')
config_file.write('author_name = "' + aname + '"\n')
config_file.write('author_bio = "' + abio + '"\n')
config_file.write('\n')
config_file.write('home_page_list = True\n')
config_file.write('\n')
config_file.write('plugins = [] # Place all needed plugins in here\n')
config_file.write(
'custom_variables = {} # Place all custom variables in here\n')
config_file.write('\n')
config_file.write('minify_css = False\n')
config_file.write('minify_js = False\n')
config_file.write('\n')
config_file.write('# The following values are used for FTP uploads')
config_file.write('\n')
config_file.write('ftp_server = "localhost"\n')
config_file.write('ftp_username = "user"\n')
config_file.write('ftp_password = "pass"\n')
config_file.write('ftp_port = 21\n')
config_file.write('ftp_upload_path = "public_html/myWebsite"\n')
config_file.close()
def createBlendedFolders():
"""Creates the standard folders for a Blended website"""
# Create the templates folder
create_folder(os.path.join(cwd, "templates"))
# Create the templates/assets folder
create_folder(os.path.join(cwd, "templates", "assets"))
# Create the templates/assets/css folder
create_folder(os.path.join(cwd, "templates", "assets", "css"))
# Create the templates/assets/js folder
create_folder(os.path.join(cwd, "templates", "assets", "js"))
# Create the templates/assets/img folder
create_folder(os.path.join(cwd, "templates", "assets", "img"))
# Create the content folder
create_folder(os.path.join(cwd, "content"))
def parseXML(filepath):
"""Parses an XML file using untangle"""
try:
output = untangle.parse(filepath)
except:
sys.exit("The export file could not be found or read!")
return output
|
BlendedSiteGenerator/Blended | blended/functions.py | checkConfig | python | def checkConfig():
config_file_dir = os.path.join(cwd, "config.py")
if os.path.exists(config_file_dir):
print("Making a backup of your config file!")
config_file_dir2 = os.path.join(cwd, "config.py.oldbak")
copyfile(config_file_dir, config_file_dir2) | If the config.py file exists, back it up | train | https://github.com/BlendedSiteGenerator/Blended/blob/e5865a8633e461a22c86ef6ee98cdd7051c412ac/blended/functions.py#L95-L101 | null | """This holds all of the functions"""
from __future__ import print_function
import os
import shutil
from shutil import copyfile
import sys
import urllib
import zipfile
from distutils.dir_util import copy_tree
import untangle
# Very important, get the directory that the user wants to run commands in
cwd = os.getcwd()
def create_folder(path):
"""Creates the specified folder if it dosen't already exist"""
if not os.path.exists(path):
os.makedirs(path)
def replace_folder(path):
"""If the specified folder exists, it is deleted and recreated"""
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
else:
os.makedirs(path)
def get_html_filename(filename):
"""Converts the filename to a .html extension"""
if ".html" in filename:
newFilename = filename
elif ".md" in filename:
newFilename = filename.replace(".md", ".html")
elif ".tile" in filename:
newFilename = filename.replace(".tile", ".html")
elif ".jade" in filename:
newFilename = filename.replace(".jade", ".html")
elif ".txt" in filename:
newFilename = filename.replace(".txt", ".html")
elif ".rst" in filename:
newFilename = filename.replace(".rst", ".html")
elif ".docx" in filename:
newFilename = filename.replace(".docx", ".html")
else:
print(filename + " is not a valid file type!")
return newFilename
def get_html_clear_filename(filename):
"""Clears the file extension from the filename and makes it nice looking"""
newFilename = filename.replace(".html", "")
newFilename = newFilename.replace(".md", "")
newFilename = newFilename.replace(".txt", "")
newFilename = newFilename.replace(".tile", "")
newFilename = newFilename.replace(".jade", "")
newFilename = newFilename.replace(".rst", "")
newFilename = newFilename.replace(".docx", "")
newFilename = newFilename.replace("index", "home")
newFilename = newFilename.replace("-", " ")
newFilename = newFilename.replace("_", " ")
newFilename = newFilename.title()
return newFilename
def getunzipped(username, repo, thedir):
"""Downloads and unzips a zip file"""
theurl = "https://github.com/" + username + "/" + repo + "/archive/master.zip"
name = os.path.join(thedir, 'temp.zip')
try:
name = urllib.urlretrieve(theurl, name)
name = os.path.join(thedir, 'temp.zip')
except IOError as e:
print("Can't retrieve %r to %r: %s" % (theurl, thedir, e))
return
try:
z = zipfile.ZipFile(name)
except zipfile.error as e:
print("Bad zipfile (from %r): %s" % (theurl, e))
return
z.extractall(thedir)
z.close()
os.remove(name)
copy_tree(os.path.join(thedir, repo + "-master"), thedir)
shutil.rmtree(os.path.join(thedir, repo + "-master"))
def createConfig(app_version=5.0, wname="", wdesc="", wdescl="", wlic="", wlan="", wurl="", aname="", abio=""):
"""Generates a config file from the information"""
config_file_dir = os.path.join(cwd, "config.py")
config_file = open(config_file_dir, "w")
config_file.write('blended_version = ' + app_version + '\n')
config_file.write('\n')
config_file.write(
'# Configuration is automatically generated by Blended (http://jmroper.com/blended), feel free to edit any values below')
config_file.write('\n')
config_file.write('website_name = "' + wname + '"\n')
config_file.write('website_description = "' + wdesc + '"\n')
config_file.write(
'website_description_long = "' + wdescl + '"\n')
config_file.write('website_license = "' + wlic + '"\n')
config_file.write('website_language = "' + wlan + '"\n')
config_file.write('website_url = "' + wurl + '"\n')
config_file.write('\n')
config_file.write('author_name = "' + aname + '"\n')
config_file.write('author_bio = "' + abio + '"\n')
config_file.write('\n')
config_file.write('home_page_list = True\n')
config_file.write('\n')
config_file.write('plugins = [] # Place all needed plugins in here\n')
config_file.write(
'custom_variables = {} # Place all custom variables in here\n')
config_file.write('\n')
config_file.write('minify_css = False\n')
config_file.write('minify_js = False\n')
config_file.write('\n')
config_file.write('# The following values are used for FTP uploads')
config_file.write('\n')
config_file.write('ftp_server = "localhost"\n')
config_file.write('ftp_username = "user"\n')
config_file.write('ftp_password = "pass"\n')
config_file.write('ftp_port = 21\n')
config_file.write('ftp_upload_path = "public_html/myWebsite"\n')
config_file.close()
def createBlendedFolders():
"""Creates the standard folders for a Blended website"""
# Create the templates folder
create_folder(os.path.join(cwd, "templates"))
# Create the templates/assets folder
create_folder(os.path.join(cwd, "templates", "assets"))
# Create the templates/assets/css folder
create_folder(os.path.join(cwd, "templates", "assets", "css"))
# Create the templates/assets/js folder
create_folder(os.path.join(cwd, "templates", "assets", "js"))
# Create the templates/assets/img folder
create_folder(os.path.join(cwd, "templates", "assets", "img"))
# Create the content folder
create_folder(os.path.join(cwd, "content"))
def parseXML(filepath):
"""Parses an XML file using untangle"""
try:
output = untangle.parse(filepath)
except:
sys.exit("The export file could not be found or read!")
return output
|
BlendedSiteGenerator/Blended | blended/functions.py | createConfig | python | def createConfig(app_version=5.0, wname="", wdesc="", wdescl="", wlic="", wlan="", wurl="", aname="", abio=""):
config_file_dir = os.path.join(cwd, "config.py")
config_file = open(config_file_dir, "w")
config_file.write('blended_version = ' + app_version + '\n')
config_file.write('\n')
config_file.write(
'# Configuration is automatically generated by Blended (http://jmroper.com/blended), feel free to edit any values below')
config_file.write('\n')
config_file.write('website_name = "' + wname + '"\n')
config_file.write('website_description = "' + wdesc + '"\n')
config_file.write(
'website_description_long = "' + wdescl + '"\n')
config_file.write('website_license = "' + wlic + '"\n')
config_file.write('website_language = "' + wlan + '"\n')
config_file.write('website_url = "' + wurl + '"\n')
config_file.write('\n')
config_file.write('author_name = "' + aname + '"\n')
config_file.write('author_bio = "' + abio + '"\n')
config_file.write('\n')
config_file.write('home_page_list = True\n')
config_file.write('\n')
config_file.write('plugins = [] # Place all needed plugins in here\n')
config_file.write(
'custom_variables = {} # Place all custom variables in here\n')
config_file.write('\n')
config_file.write('minify_css = False\n')
config_file.write('minify_js = False\n')
config_file.write('\n')
config_file.write('# The following values are used for FTP uploads')
config_file.write('\n')
config_file.write('ftp_server = "localhost"\n')
config_file.write('ftp_username = "user"\n')
config_file.write('ftp_password = "pass"\n')
config_file.write('ftp_port = 21\n')
config_file.write('ftp_upload_path = "public_html/myWebsite"\n')
config_file.close() | Generates a config file from the information | train | https://github.com/BlendedSiteGenerator/Blended/blob/e5865a8633e461a22c86ef6ee98cdd7051c412ac/blended/functions.py#L104-L141 | null | """This holds all of the functions"""
from __future__ import print_function
import os
import shutil
from shutil import copyfile
import sys
import urllib
import zipfile
from distutils.dir_util import copy_tree
import untangle
# Very important, get the directory that the user wants to run commands in
cwd = os.getcwd()
def create_folder(path):
"""Creates the specified folder if it dosen't already exist"""
if not os.path.exists(path):
os.makedirs(path)
def replace_folder(path):
"""If the specified folder exists, it is deleted and recreated"""
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
else:
os.makedirs(path)
def get_html_filename(filename):
"""Converts the filename to a .html extension"""
if ".html" in filename:
newFilename = filename
elif ".md" in filename:
newFilename = filename.replace(".md", ".html")
elif ".tile" in filename:
newFilename = filename.replace(".tile", ".html")
elif ".jade" in filename:
newFilename = filename.replace(".jade", ".html")
elif ".txt" in filename:
newFilename = filename.replace(".txt", ".html")
elif ".rst" in filename:
newFilename = filename.replace(".rst", ".html")
elif ".docx" in filename:
newFilename = filename.replace(".docx", ".html")
else:
print(filename + " is not a valid file type!")
return newFilename
def get_html_clear_filename(filename):
"""Clears the file extension from the filename and makes it nice looking"""
newFilename = filename.replace(".html", "")
newFilename = newFilename.replace(".md", "")
newFilename = newFilename.replace(".txt", "")
newFilename = newFilename.replace(".tile", "")
newFilename = newFilename.replace(".jade", "")
newFilename = newFilename.replace(".rst", "")
newFilename = newFilename.replace(".docx", "")
newFilename = newFilename.replace("index", "home")
newFilename = newFilename.replace("-", " ")
newFilename = newFilename.replace("_", " ")
newFilename = newFilename.title()
return newFilename
def getunzipped(username, repo, thedir):
"""Downloads and unzips a zip file"""
theurl = "https://github.com/" + username + "/" + repo + "/archive/master.zip"
name = os.path.join(thedir, 'temp.zip')
try:
name = urllib.urlretrieve(theurl, name)
name = os.path.join(thedir, 'temp.zip')
except IOError as e:
print("Can't retrieve %r to %r: %s" % (theurl, thedir, e))
return
try:
z = zipfile.ZipFile(name)
except zipfile.error as e:
print("Bad zipfile (from %r): %s" % (theurl, e))
return
z.extractall(thedir)
z.close()
os.remove(name)
copy_tree(os.path.join(thedir, repo + "-master"), thedir)
shutil.rmtree(os.path.join(thedir, repo + "-master"))
def checkConfig():
"""If the config.py file exists, back it up"""
config_file_dir = os.path.join(cwd, "config.py")
if os.path.exists(config_file_dir):
print("Making a backup of your config file!")
config_file_dir2 = os.path.join(cwd, "config.py.oldbak")
copyfile(config_file_dir, config_file_dir2)
def createBlendedFolders():
"""Creates the standard folders for a Blended website"""
# Create the templates folder
create_folder(os.path.join(cwd, "templates"))
# Create the templates/assets folder
create_folder(os.path.join(cwd, "templates", "assets"))
# Create the templates/assets/css folder
create_folder(os.path.join(cwd, "templates", "assets", "css"))
# Create the templates/assets/js folder
create_folder(os.path.join(cwd, "templates", "assets", "js"))
# Create the templates/assets/img folder
create_folder(os.path.join(cwd, "templates", "assets", "img"))
# Create the content folder
create_folder(os.path.join(cwd, "content"))
def parseXML(filepath):
"""Parses an XML file using untangle"""
try:
output = untangle.parse(filepath)
except:
sys.exit("The export file could not be found or read!")
return output
|
BlendedSiteGenerator/Blended | blended/functions.py | createBlendedFolders | python | def createBlendedFolders():
# Create the templates folder
create_folder(os.path.join(cwd, "templates"))
# Create the templates/assets folder
create_folder(os.path.join(cwd, "templates", "assets"))
# Create the templates/assets/css folder
create_folder(os.path.join(cwd, "templates", "assets", "css"))
# Create the templates/assets/js folder
create_folder(os.path.join(cwd, "templates", "assets", "js"))
# Create the templates/assets/img folder
create_folder(os.path.join(cwd, "templates", "assets", "img"))
# Create the content folder
create_folder(os.path.join(cwd, "content")) | Creates the standard folders for a Blended website | train | https://github.com/BlendedSiteGenerator/Blended/blob/e5865a8633e461a22c86ef6ee98cdd7051c412ac/blended/functions.py#L144-L162 | [
"def create_folder(path):\n \"\"\"Creates the specified folder if it dosen't already exist\"\"\"\n if not os.path.exists(path):\n os.makedirs(path)\n"
] | """This holds all of the functions"""
from __future__ import print_function
import os
import shutil
from shutil import copyfile
import sys
import urllib
import zipfile
from distutils.dir_util import copy_tree
import untangle
# Very important, get the directory that the user wants to run commands in
cwd = os.getcwd()
def create_folder(path):
"""Creates the specified folder if it dosen't already exist"""
if not os.path.exists(path):
os.makedirs(path)
def replace_folder(path):
"""If the specified folder exists, it is deleted and recreated"""
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
else:
os.makedirs(path)
def get_html_filename(filename):
"""Converts the filename to a .html extension"""
if ".html" in filename:
newFilename = filename
elif ".md" in filename:
newFilename = filename.replace(".md", ".html")
elif ".tile" in filename:
newFilename = filename.replace(".tile", ".html")
elif ".jade" in filename:
newFilename = filename.replace(".jade", ".html")
elif ".txt" in filename:
newFilename = filename.replace(".txt", ".html")
elif ".rst" in filename:
newFilename = filename.replace(".rst", ".html")
elif ".docx" in filename:
newFilename = filename.replace(".docx", ".html")
else:
print(filename + " is not a valid file type!")
return newFilename
def get_html_clear_filename(filename):
"""Clears the file extension from the filename and makes it nice looking"""
newFilename = filename.replace(".html", "")
newFilename = newFilename.replace(".md", "")
newFilename = newFilename.replace(".txt", "")
newFilename = newFilename.replace(".tile", "")
newFilename = newFilename.replace(".jade", "")
newFilename = newFilename.replace(".rst", "")
newFilename = newFilename.replace(".docx", "")
newFilename = newFilename.replace("index", "home")
newFilename = newFilename.replace("-", " ")
newFilename = newFilename.replace("_", " ")
newFilename = newFilename.title()
return newFilename
def getunzipped(username, repo, thedir):
"""Downloads and unzips a zip file"""
theurl = "https://github.com/" + username + "/" + repo + "/archive/master.zip"
name = os.path.join(thedir, 'temp.zip')
try:
name = urllib.urlretrieve(theurl, name)
name = os.path.join(thedir, 'temp.zip')
except IOError as e:
print("Can't retrieve %r to %r: %s" % (theurl, thedir, e))
return
try:
z = zipfile.ZipFile(name)
except zipfile.error as e:
print("Bad zipfile (from %r): %s" % (theurl, e))
return
z.extractall(thedir)
z.close()
os.remove(name)
copy_tree(os.path.join(thedir, repo + "-master"), thedir)
shutil.rmtree(os.path.join(thedir, repo + "-master"))
def checkConfig():
"""If the config.py file exists, back it up"""
config_file_dir = os.path.join(cwd, "config.py")
if os.path.exists(config_file_dir):
print("Making a backup of your config file!")
config_file_dir2 = os.path.join(cwd, "config.py.oldbak")
copyfile(config_file_dir, config_file_dir2)
def createConfig(app_version=5.0, wname="", wdesc="", wdescl="", wlic="", wlan="", wurl="", aname="", abio=""):
"""Generates a config file from the information"""
config_file_dir = os.path.join(cwd, "config.py")
config_file = open(config_file_dir, "w")
config_file.write('blended_version = ' + app_version + '\n')
config_file.write('\n')
config_file.write(
'# Configuration is automatically generated by Blended (http://jmroper.com/blended), feel free to edit any values below')
config_file.write('\n')
config_file.write('website_name = "' + wname + '"\n')
config_file.write('website_description = "' + wdesc + '"\n')
config_file.write(
'website_description_long = "' + wdescl + '"\n')
config_file.write('website_license = "' + wlic + '"\n')
config_file.write('website_language = "' + wlan + '"\n')
config_file.write('website_url = "' + wurl + '"\n')
config_file.write('\n')
config_file.write('author_name = "' + aname + '"\n')
config_file.write('author_bio = "' + abio + '"\n')
config_file.write('\n')
config_file.write('home_page_list = True\n')
config_file.write('\n')
config_file.write('plugins = [] # Place all needed plugins in here\n')
config_file.write(
'custom_variables = {} # Place all custom variables in here\n')
config_file.write('\n')
config_file.write('minify_css = False\n')
config_file.write('minify_js = False\n')
config_file.write('\n')
config_file.write('# The following values are used for FTP uploads')
config_file.write('\n')
config_file.write('ftp_server = "localhost"\n')
config_file.write('ftp_username = "user"\n')
config_file.write('ftp_password = "pass"\n')
config_file.write('ftp_port = 21\n')
config_file.write('ftp_upload_path = "public_html/myWebsite"\n')
config_file.close()
def parseXML(filepath):
"""Parses an XML file using untangle"""
try:
output = untangle.parse(filepath)
except:
sys.exit("The export file could not be found or read!")
return output
|
miku/gluish | gluish/common.py | getfirstline | python | def getfirstline(file, default):
with open(file, 'rb') as fh:
content = fh.readlines()
if len(content) == 1:
return content[0].decode('utf-8').strip('\n')
return default | Returns the first line of a file. | train | https://github.com/miku/gluish/blob/56d3ac4f41a944e31ecac0aa3b6d1dc2ce705e29/gluish/common.py#L51-L60 | null | #!/usr/bin/env python
# coding: utf-8
#
# Copyright 2015 by Leipzig University Library, http://ub.uni-leipzig.de
# by The Finc Authors, http://finc.info
# by Martin Czygan, <martin.czygan@uni-leipzig.de>
#
# This file is part of some open source application.
#
# Some open source application is free software: you can redistribute
# it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# Some open source application is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
#
import datetime
import logging
import os
import luigi
import requests
from gluish.parameter import ClosestDateParameter
from gluish.utils import shellout, which
__all__ = ['Executable', 'Available', 'GitCloneRepository', 'GitUpdateRepository', 'FillSolrIndex']
logger = logging.getLogger('gluish')
def service_is_up(service):
"""
Return `False` if HTTP services returns status code != 200.
"""
try:
return requests.get(service).status_code == 200
except:
return False
class chdir(object):
"""
Change directory temporarily.
"""
def __init__(self, path):
self.wd = os.getcwd()
self.dir = path
def __enter__(self):
os.chdir(self.dir)
def __exit__(self, *args):
os.chdir(self.wd)
class Executable(luigi.Task):
"""
Checks, whether an external executable is available. This task will consider
itself complete, only if the executable `name` is found in $PATH.
"""
name = luigi.Parameter()
message = luigi.Parameter(default="")
def run(self):
""" Only run if, task is not complete. """
raise RuntimeError('external program required: %s (%s)' % (self.name, self.message))
def complete(self):
return which(self.name) is not None
class Available(luigi.Task):
"""
Checks, whether an HTTP service is available or not. This task will consider
itself complete, only if the HTTP service return a 200.
"""
service = luigi.Parameter()
message = luigi.Parameter(default="")
def run(self):
""" Only run if, task is not complete. """
raise RuntimeError('HTTP service %s is not available (%s)' % (self.service, self.message))
def complete(self):
return service_is_up(self.service)
class GitCloneRepository(luigi.Task):
"""
Checks, whether a certain directory already exists (that should contain a Git repository) - otherwise it will be cloned.
"""
gitrepository = luigi.Parameter()
repositorydirectory = luigi.Parameter()
basedirectory = luigi.Parameter()
def requires(self):
return Executable(name='git')
def run(self):
self.output().makedirs()
with chdir(str(self.basedirectory)):
shellout("""git clone {gitrepository}""", gitrepository=self.gitrepository)
def output(self):
return luigi.LocalTarget(path=os.path.join(self.basedirectory, str(self.repositorydirectory)))
class GitUpdateRepository(luigi.Task):
"""
Updates an existing Git repository
"""
gitrepository = luigi.Parameter()
repositorydirectory = luigi.Parameter()
basedirectory = luigi.Parameter()
branch = luigi.Parameter(default="master", significant=False)
def requires(self):
return [
GitCloneRepository(gitrepository=self.gitrepository,
repositorydirectory=self.repositorydirectory,
basedirectory=self.basedirectory),
Executable(name='git')
]
def run(self):
with chdir(str(self.output().path)):
shellout("""git checkout {branch}""", branch=self.branch)
shellout("""git pull origin {branch}""", branch=self.branch)
def complete(self):
if not self.output().exists():
return False
with chdir(str(self.output().path)):
output = shellout("""git fetch origin {branch} > {output} 2>&1""", branch=self.branch)
result = True
with open(output, 'rb') as fh:
content = fh.readlines()
if len(content) >= 3:
result = False
revparseoutput = shellout("""git rev-parse {branch} > {output} 2>&1""", branch=self.branch)
originrevparseoutput = shellout("""git rev-parse origin/{branch} > {output} 2>&1""", branch=self.branch)
revparse = getfirstline(revparseoutput, "0")
originrevparse = getfirstline(originrevparseoutput, "1")
if revparse != originrevparse:
result = False
return result
def output(self):
return luigi.LocalTarget(path=os.path.join(str(self.basedirectory), str(self.repositorydirectory)))
class FillSolrIndex(luigi.Task):
# TODO: define proper complete criteria (?)
# e.g. check, whether the amount of records that should be loaded into the index is index (if not, then index load is not successfully)
"""
Loads processed data of a data package into a given Solr index (with help of solrbulk)
"""
date = ClosestDateParameter(default=datetime.date.today())
solruri = luigi.Parameter()
solrcore = luigi.Parameter()
purge = luigi.BoolParameter(default=False, significant=False)
purgequery = luigi.Parameter(default="", significant=False)
size = luigi.IntParameter(default=1000, significant=False)
worker = luigi.IntParameter(default=2, significant=False)
commitlimit = luigi.IntParameter(default=1000, significant=False)
input = luigi.Parameter()
taskdir = luigi.Parameter()
outputfilename = luigi.Parameter()
salt = luigi.Parameter()
def determineprefix(self, purge=None, purgequery=None):
solrbulk = 'solrbulk'
if purge and purgequery is not None:
return solrbulk + ' -purge -purge-query "' + purgequery + '"'
if purge:
return solrbulk + ' -purge'
return solrbulk
def requires(self):
return [
Available(service=self.solruri,
message="provide a running Solr, please"),
Executable(name='solrbulk',
message='solrbulk command is missing on your system, you can, e.g., install it as a deb package on your Debian-based linux system (see https://github.com/miku/solrbulk#installation)'),
]
def run(self):
prefix = self.determineprefix(self.purge, self.purgequery)
server = str(self.solruri) + str(self.solrcore)
chunksize = self.size
cores = self.worker
inputpath = self.input
commit = self.commitlimit
output = shellout(
"""{prefix} -verbose -server {server} -size {size} -w {worker} -commit {commit} < {input}""",
prefix=prefix,
server=server,
size=chunksize,
worker=cores,
commit=commit,
input=inputpath)
luigi.LocalTarget(output).move(self.output().path)
def output(self):
return luigi.LocalTarget(path=os.path.join(self.taskdir, str(self.outputfilename)))
|
miku/gluish | gluish/format.py | write_tsv | python | def write_tsv(output_stream, *tup, **kwargs):
encoding = kwargs.get('encoding') or 'utf-8'
value = '\t'.join([s for s in tup]) + '\n'
output_stream.write(value.encode(encoding)) | Write argument list in `tup` out as a tab-separeated row to the stream. | train | https://github.com/miku/gluish/blob/56d3ac4f41a944e31ecac0aa3b6d1dc2ce705e29/gluish/format.py#L56-L62 | null | #!/usr/bin/env python
# coding: utf-8
#
# Copyright 2015 by Leipzig University Library, http://ub.uni-leipzig.de
# by The Finc Authors, http://finc.info
# by Martin Czygan, <martin.czygan@uni-leipzig.de>
#
# This file is part of some open source application.
#
# Some open source application is free software: you can redistribute
# it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# Some open source application is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
#
"""
Format add-ons
==============
Format related functions and classes. Highlights: A TSV class, that helps
to work with tabular data.
Example:
def run(self):
with self.output().open('w') as output:
output.write_tsv('Ubik', '1969', '67871286')
def output(self):
return luigi.LocalTarget(path=self.path(), format=TSV)
"""
from __future__ import unicode_literals
import collections
import functools
from builtins import str
import luigi
from gluish.utils import random_string, which
__all__ = ['TSV']
def iter_tsv(input_stream, cols=None, encoding='utf-8'):
"""
If a tuple is given in cols, use the elements as names to construct
a namedtuple.
Columns can be marked as ignored by using ``X`` or ``0`` as column name.
Example (ignore the first four columns of a five column TSV):
::
def run(self):
with self.input().open() as handle:
for row in handle.iter_tsv(cols=('X', 'X', 'X', 'X', 'iln')):
print(row.iln)
"""
if cols:
cols = [c if not c in ('x', 'X', 0, None) else random_string(length=5)
for c in cols]
Record = collections.namedtuple('Record', cols)
for line in input_stream:
yield Record._make(line.decode(encoding).rstrip('\n').split('\t'))
else:
for line in input_stream:
yield tuple(line.decode(encoding).rstrip('\n').split('\t'))
class TSVFormat(luigi.format.Format):
"""
A basic CSV/TSV format.
Discussion: https://groups.google.com/forum/#!topic/luigi-user/F813st16xqw
"""
def hdfs_reader(self, input_pipe):
raise NotImplementedError()
def hdfs_writer(self, output_pipe):
raise NotImplementedError()
def pipe_reader(self, input_pipe):
input_pipe.iter_tsv = functools.partial(iter_tsv, input_pipe)
return input_pipe
def pipe_writer(self, output_pipe):
output_pipe.write_tsv = functools.partial(write_tsv, output_pipe)
return output_pipe
class GzipFormat(luigi.format.Format):
"""
A gzip format, that upgrades itself to pigz, if it's installed.
"""
input = 'bytes'
output = 'bytes'
def __init__(self, compression_level=None):
self.compression_level = compression_level
self.gzip = ["gzip"]
self.gunzip = ["gunzip"]
if which('pigz'):
self.gzip = ["pigz"]
self.gunzip = ["unpigz"]
def pipe_reader(self, input_pipe):
return luigi.format.InputPipeProcessWrapper(self.gunzip, input_pipe)
def pipe_writer(self, output_pipe):
args = self.gzip
if self.compression_level is not None:
args.append('-' + str(int(self.compression_level)))
return luigi.format.OutputPipeProcessWrapper(args, output_pipe)
TSV = TSVFormat()
Gzip = GzipFormat()
|
miku/gluish | gluish/format.py | iter_tsv | python | def iter_tsv(input_stream, cols=None, encoding='utf-8'):
if cols:
cols = [c if not c in ('x', 'X', 0, None) else random_string(length=5)
for c in cols]
Record = collections.namedtuple('Record', cols)
for line in input_stream:
yield Record._make(line.decode(encoding).rstrip('\n').split('\t'))
else:
for line in input_stream:
yield tuple(line.decode(encoding).rstrip('\n').split('\t')) | If a tuple is given in cols, use the elements as names to construct
a namedtuple.
Columns can be marked as ignored by using ``X`` or ``0`` as column name.
Example (ignore the first four columns of a five column TSV):
::
def run(self):
with self.input().open() as handle:
for row in handle.iter_tsv(cols=('X', 'X', 'X', 'X', 'iln')):
print(row.iln) | train | https://github.com/miku/gluish/blob/56d3ac4f41a944e31ecac0aa3b6d1dc2ce705e29/gluish/format.py#L65-L89 | null | #!/usr/bin/env python
# coding: utf-8
#
# Copyright 2015 by Leipzig University Library, http://ub.uni-leipzig.de
# by The Finc Authors, http://finc.info
# by Martin Czygan, <martin.czygan@uni-leipzig.de>
#
# This file is part of some open source application.
#
# Some open source application is free software: you can redistribute
# it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# Some open source application is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
#
"""
Format add-ons
==============
Format related functions and classes. Highlights: A TSV class, that helps
to work with tabular data.
Example:
def run(self):
with self.output().open('w') as output:
output.write_tsv('Ubik', '1969', '67871286')
def output(self):
return luigi.LocalTarget(path=self.path(), format=TSV)
"""
from __future__ import unicode_literals
import collections
import functools
from builtins import str
import luigi
from gluish.utils import random_string, which
__all__ = ['TSV']
def write_tsv(output_stream, *tup, **kwargs):
"""
Write argument list in `tup` out as a tab-separeated row to the stream.
"""
encoding = kwargs.get('encoding') or 'utf-8'
value = '\t'.join([s for s in tup]) + '\n'
output_stream.write(value.encode(encoding))
class TSVFormat(luigi.format.Format):
"""
A basic CSV/TSV format.
Discussion: https://groups.google.com/forum/#!topic/luigi-user/F813st16xqw
"""
def hdfs_reader(self, input_pipe):
raise NotImplementedError()
def hdfs_writer(self, output_pipe):
raise NotImplementedError()
def pipe_reader(self, input_pipe):
input_pipe.iter_tsv = functools.partial(iter_tsv, input_pipe)
return input_pipe
def pipe_writer(self, output_pipe):
output_pipe.write_tsv = functools.partial(write_tsv, output_pipe)
return output_pipe
class GzipFormat(luigi.format.Format):
"""
A gzip format, that upgrades itself to pigz, if it's installed.
"""
input = 'bytes'
output = 'bytes'
def __init__(self, compression_level=None):
self.compression_level = compression_level
self.gzip = ["gzip"]
self.gunzip = ["gunzip"]
if which('pigz'):
self.gzip = ["pigz"]
self.gunzip = ["unpigz"]
def pipe_reader(self, input_pipe):
return luigi.format.InputPipeProcessWrapper(self.gunzip, input_pipe)
def pipe_writer(self, output_pipe):
args = self.gzip
if self.compression_level is not None:
args.append('-' + str(int(self.compression_level)))
return luigi.format.OutputPipeProcessWrapper(args, output_pipe)
TSV = TSVFormat()
Gzip = GzipFormat()
|
miku/gluish | gluish/intervals.py | every_minute | python | def every_minute(dt=datetime.datetime.utcnow(), fmt=None):
date = datetime.datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute, 1, 0, dt.tzinfo)
if fmt is not None:
return date.strftime(fmt)
return date | Just pass on the given date. | train | https://github.com/miku/gluish/blob/56d3ac4f41a944e31ecac0aa3b6d1dc2ce705e29/gluish/intervals.py#L43-L50 | null | # coding: utf-8
#
# Copyright 2015 by Leipzig University Library, http://ub.uni-leipzig.de
# by The Finc Authors, http://finc.info
# by Martin Czygan, <martin.czygan@uni-leipzig.de>
#
# This file is part of some open source application.
#
# Some open source application is free software: you can redistribute
# it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# Some open source application is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
#
"""
Various intervals.
"""
import datetime
__all__ = [
'biweekly',
'daily',
'every_minute',
'hourly',
'monthly',
'quarterly',
'semiyearly',
'weekly',
'yearly',
]
def hourly(dt=datetime.datetime.utcnow(), fmt=None):
"""
Get a new datetime object every hour.
"""
date = datetime.datetime(dt.year, dt.month, dt.day, dt.hour, 1, 1, 0, dt.tzinfo)
if fmt is not None:
return date.strftime(fmt)
return date
def daily(date=datetime.date.today()):
"""
Just pass on the given date.
"""
return date
def weekly(date=datetime.date.today()):
"""
Weeks start are fixes at Monday for now.
"""
return date - datetime.timedelta(days=date.weekday())
def biweekly(date=datetime.date.today()):
"""
Every two weeks.
"""
return datetime.date(date.year, date.month, 1 if date.day < 15 else 15)
def monthly(date=datetime.date.today()):
"""
Take a date object and return the first day of the month.
"""
return datetime.date(date.year, date.month, 1)
def quarterly(date=datetime.date.today()):
"""
Fixed at: 1/1, 4/1, 7/1, 10/1.
"""
return datetime.date(date.year, ((date.month - 1)//3) * 3 + 1, 1)
def semiyearly(date=datetime.date.today()):
"""
Twice a year.
"""
return datetime.date(date.year, 1 if date.month < 7 else 7, 1)
def yearly(date=datetime.date.today()):
"""
Once a year.
"""
return datetime.date(date.year, 1, 1)
|
miku/gluish | gluish/intervals.py | hourly | python | def hourly(dt=datetime.datetime.utcnow(), fmt=None):
date = datetime.datetime(dt.year, dt.month, dt.day, dt.hour, 1, 1, 0, dt.tzinfo)
if fmt is not None:
return date.strftime(fmt)
return date | Get a new datetime object every hour. | train | https://github.com/miku/gluish/blob/56d3ac4f41a944e31ecac0aa3b6d1dc2ce705e29/gluish/intervals.py#L52-L59 | null | # coding: utf-8
#
# Copyright 2015 by Leipzig University Library, http://ub.uni-leipzig.de
# by The Finc Authors, http://finc.info
# by Martin Czygan, <martin.czygan@uni-leipzig.de>
#
# This file is part of some open source application.
#
# Some open source application is free software: you can redistribute
# it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# Some open source application is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
#
"""
Various intervals.
"""
import datetime
__all__ = [
'biweekly',
'daily',
'every_minute',
'hourly',
'monthly',
'quarterly',
'semiyearly',
'weekly',
'yearly',
]
def every_minute(dt=datetime.datetime.utcnow(), fmt=None):
"""
Just pass on the given date.
"""
date = datetime.datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute, 1, 0, dt.tzinfo)
if fmt is not None:
return date.strftime(fmt)
return date
def daily(date=datetime.date.today()):
"""
Just pass on the given date.
"""
return date
def weekly(date=datetime.date.today()):
"""
Weeks start are fixes at Monday for now.
"""
return date - datetime.timedelta(days=date.weekday())
def biweekly(date=datetime.date.today()):
"""
Every two weeks.
"""
return datetime.date(date.year, date.month, 1 if date.day < 15 else 15)
def monthly(date=datetime.date.today()):
"""
Take a date object and return the first day of the month.
"""
return datetime.date(date.year, date.month, 1)
def quarterly(date=datetime.date.today()):
"""
Fixed at: 1/1, 4/1, 7/1, 10/1.
"""
return datetime.date(date.year, ((date.month - 1)//3) * 3 + 1, 1)
def semiyearly(date=datetime.date.today()):
"""
Twice a year.
"""
return datetime.date(date.year, 1 if date.month < 7 else 7, 1)
def yearly(date=datetime.date.today()):
"""
Once a year.
"""
return datetime.date(date.year, 1, 1)
|
miku/gluish | gluish/intervals.py | weekly | python | def weekly(date=datetime.date.today()):
return date - datetime.timedelta(days=date.weekday()) | Weeks start are fixes at Monday for now. | train | https://github.com/miku/gluish/blob/56d3ac4f41a944e31ecac0aa3b6d1dc2ce705e29/gluish/intervals.py#L67-L71 | null | # coding: utf-8
#
# Copyright 2015 by Leipzig University Library, http://ub.uni-leipzig.de
# by The Finc Authors, http://finc.info
# by Martin Czygan, <martin.czygan@uni-leipzig.de>
#
# This file is part of some open source application.
#
# Some open source application is free software: you can redistribute
# it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# Some open source application is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
#
"""
Various intervals.
"""
import datetime
__all__ = [
'biweekly',
'daily',
'every_minute',
'hourly',
'monthly',
'quarterly',
'semiyearly',
'weekly',
'yearly',
]
def every_minute(dt=datetime.datetime.utcnow(), fmt=None):
"""
Just pass on the given date.
"""
date = datetime.datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute, 1, 0, dt.tzinfo)
if fmt is not None:
return date.strftime(fmt)
return date
def hourly(dt=datetime.datetime.utcnow(), fmt=None):
"""
Get a new datetime object every hour.
"""
date = datetime.datetime(dt.year, dt.month, dt.day, dt.hour, 1, 1, 0, dt.tzinfo)
if fmt is not None:
return date.strftime(fmt)
return date
def daily(date=datetime.date.today()):
"""
Just pass on the given date.
"""
return date
def biweekly(date=datetime.date.today()):
"""
Every two weeks.
"""
return datetime.date(date.year, date.month, 1 if date.day < 15 else 15)
def monthly(date=datetime.date.today()):
"""
Take a date object and return the first day of the month.
"""
return datetime.date(date.year, date.month, 1)
def quarterly(date=datetime.date.today()):
"""
Fixed at: 1/1, 4/1, 7/1, 10/1.
"""
return datetime.date(date.year, ((date.month - 1)//3) * 3 + 1, 1)
def semiyearly(date=datetime.date.today()):
"""
Twice a year.
"""
return datetime.date(date.year, 1 if date.month < 7 else 7, 1)
def yearly(date=datetime.date.today()):
"""
Once a year.
"""
return datetime.date(date.year, 1, 1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.