repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
xtream1101/cutil
|
cutil/database.py
|
Database.update
|
python
|
def update(self, table, data_list, matched_field=None, return_cols='id'):
data_list = copy.deepcopy(data_list) # Create deepcopy so the original list does not get modified
if matched_field is None:
# Assume the id field
logger.info("Matched field not defined, assuming the `id` field")
matched_field = 'id'
# Make sure that `data_list` is a list
if not isinstance(data_list, list):
data_list = [data_list]
if len(data_list) == 0:
# No need to continue
return []
# Make sure return_cols is a list
if return_cols is None or len(return_cols) == 0 or return_cols[0] is None:
return_cols = ''
elif not isinstance(return_cols, list):
return_cols = [return_cols]
if len(return_cols) > 0:
return_cols = 'RETURNING ' + ','.join(return_cols)
# Data in the list must be dicts (just check the first one)
if not isinstance(data_list[0], dict):
logger.critical("Data must be a list of dicts")
# Do not return here, let the exception handle the error that will be thrown when the query runs
try:
with self.getcursor() as cur:
query_list = []
# TODO: change to return data from the database, not just what you passed in
return_list = []
for row in data_list:
if row.get(matched_field) is None:
logger.debug("Cannot update row. Missing field {field} in data {data}"
.format(field=matched_field, data=row))
logger.error("Cannot update row. Missing field {field} in data".format(field=matched_field))
continue
# Pull matched_value from data to be updated and remove that key
matched_value = row.get(matched_field)
del row[matched_field]
query = "UPDATE {table} SET {data} WHERE {matched_field}=%s {return_cols}"\
.format(table=table,
data=','.join("%s=%%s" % u for u in row.keys()),
matched_field=matched_field,
return_cols=return_cols
)
values = list(row.values())
values.append(matched_value)
values = _check_values(values)
query = cur.mogrify(query, values)
query_list.append(query)
return_list.append(matched_value)
finial_query = b';'.join(query_list)
cur.execute(finial_query)
try:
return cur.fetchall()
except Exception:
return None
except Exception as e:
logger.exception("Error updating data")
logger.debug("Error updating data: {data}".format(data=data_list))
raise e.with_traceback(sys.exc_info()[2])
|
Create a bulk insert statement which is much faster (~2x in tests with 10k & 100k rows and 4 cols)
for inserting data then executemany()
TODO: Is there a limit of length the query can be? If so handle it.
|
train
|
https://github.com/xtream1101/cutil/blob/2e4d1f00e66154b44d4ccffb9b1db3f37e87f2e8/cutil/database.py#L213-L289
|
[
"def _check_values(in_values):\n \"\"\" Check if values need to be converted before they get mogrify'd\n \"\"\"\n out_values = []\n for value in in_values:\n # if isinstance(value, (dict, list)):\n # out_values.append(json.dumps(value))\n # else:\n out_values.append(value)\n\n return tuple(out_values)\n"
] |
class Database:
def __init__(self, db_config, table_raw=None, max_connections=10):
from psycopg2.pool import ThreadedConnectionPool
self.table_raw = table_raw
try:
# Set default port is port is not passed
if 'db_port' not in db_config:
db_config['db_port'] = 5432
self.pool = ThreadedConnectionPool(minconn=1,
maxconn=max_connections,
dsn="dbname={db_name} user={db_user} host={db_host} password={db_pass} port={db_port}"
.format(**db_config))
except Exception:
logger.exception("Error in db connection")
sys.exit(1)
logger.debug("Connected to database: {host}".format(host=db_config['db_host']))
@contextmanager
def getcursor(self, **kwargs):
conn = self.pool.getconn()
try:
yield conn.cursor(**kwargs)
conn.commit()
except Exception as e:
conn.rollback()
raise e.with_traceback(sys.exc_info()[2])
finally:
self.pool.putconn(conn)
def close(self):
self.pool.closeall()
def insert(self, table, data_list, return_cols='id'):
"""
Create a bulk insert statement which is much faster (~2x in tests with 10k & 100k rows and n cols)
for inserting data then executemany()
TODO: Is there a limit of length the query can be? If so handle it.
"""
data_list = copy.deepcopy(data_list) # Create deepcopy so the original list does not get modified
# Make sure that `data_list` is a list
if not isinstance(data_list, list):
data_list = [data_list]
# Make sure data_list has content
if len(data_list) == 0:
# No need to continue
return []
# Data in the list must be dicts (just check the first one)
if not isinstance(data_list[0], dict):
logger.critical("Data must be a list of dicts")
# Do not return here, let the exception handle the error that will be thrown when the query runs
# Make sure return_cols is a list
if return_cols is None or len(return_cols) == 0 or return_cols[0] is None:
return_cols = ''
elif not isinstance(return_cols, list):
return_cols = [return_cols]
if len(return_cols) > 0:
return_cols = 'RETURNING ' + ','.join(return_cols)
try:
with self.getcursor() as cur:
query = "INSERT INTO {table} ({fields}) VALUES {values} {return_cols}"\
.format(table=table,
fields='"{0}"'.format('", "'.join(data_list[0].keys())),
values=','.join(['%s'] * len(data_list)),
return_cols=return_cols,
)
values = []
for row in [tuple(v.values()) for v in data_list]:
values.append(_check_values(row))
query = cur.mogrify(query, values)
cur.execute(query)
try:
return cur.fetchall()
except Exception:
return None
except Exception as e:
logger.exception("Error inserting data")
logger.debug("Error inserting data: {data}".format(data=data_list))
raise e.with_traceback(sys.exc_info()[2])
def upsert(self, table, data_list, on_conflict_fields, on_conflict_action='update',
update_fields=None, return_cols='id'):
"""
Create a bulk upsert statement which is much faster (~6x in tests with 10k & 100k rows and n cols)
for upserting data then executemany()
TODO: Is there a limit of length the query can be? If so handle it.
"""
data_list = copy.deepcopy(data_list) # Create deepcopy so the original list does not get modified
# Make sure that `data_list` is a list
if not isinstance(data_list, list):
data_list = [data_list]
# Make sure data_list has content
if len(data_list) == 0:
# No need to continue
return []
# Data in the list must be dicts (just check the first one)
if not isinstance(data_list[0], dict):
logger.critical("Data must be a list of dicts")
# TODO: raise some error here rather then returning None
return None
# Make sure on_conflict_fields is a list
if not isinstance(on_conflict_fields, list):
on_conflict_fields = [on_conflict_fields]
# Make sure on_conflict_fields has data
if len(on_conflict_fields) == 0 or on_conflict_fields[0] is None:
# No need to continue
logger.critical("Must pass in `on_conflict_fields` argument")
# TODO: raise some error here rather then returning None
return None
# Make sure return_cols is a list
if return_cols is None or len(return_cols) == 0 or return_cols[0] is None:
return_cols = ''
elif not isinstance(return_cols, list):
return_cols = [return_cols]
if len(return_cols) > 0:
return_cols = 'RETURNING ' + ','.join(return_cols)
# Make sure update_fields is a list/valid
if on_conflict_action == 'update':
if not isinstance(update_fields, list):
update_fields = [update_fields]
# If noting is passed in, set `update_fields` to all (data_list-on_conflict_fields)
if len(update_fields) == 0 or update_fields[0] is None:
update_fields = list(set(data_list[0].keys()) - set(on_conflict_fields))
# If update_fields is empty here that could only mean that all fields are set as conflict_fields
if len(update_fields) == 0:
logger.critical("Not all the fields can be `on_conflict_fields` when doing an update")
# TODO: raise some error here rather then returning None
return None
# If everything is good to go with the update fields
fields_update_tmp = []
for key in data_list[0].keys():
fields_update_tmp.append('"{0}"="excluded"."{0}"'.format(key))
conflict_action_sql = 'UPDATE SET {update_fields}'\
.format(update_fields=', '.join(fields_update_tmp))
else:
# Do nothing on conflict
conflict_action_sql = 'NOTHING'
try:
with self.getcursor() as cur:
query = """INSERT INTO {table} ({insert_fields})
VALUES {values}
ON CONFLICT ({on_conflict_fields}) DO
{conflict_action_sql}
{return_cols}
""".format(table=table,
insert_fields='"{0}"'.format('","'.join(data_list[0].keys())),
values=','.join(['%s'] * len(data_list)),
on_conflict_fields=','.join(on_conflict_fields),
conflict_action_sql=conflict_action_sql,
return_cols=return_cols,
)
# Get all the values for each row and create a lists of lists
values = []
for row in [list(v.values()) for v in data_list]:
values.append(_check_values(row))
query = cur.mogrify(query, values)
cur.execute(query)
try:
return cur.fetchall()
except Exception:
return None
except Exception as e:
logger.exception("Error upserting data")
logger.debug("Error upserting data: {data}".format(data=data_list))
raise e.with_traceback(sys.exc_info()[2])
|
xtream1101/cutil
|
cutil/config.py
|
Config.load_configs
|
python
|
def load_configs(self, conf_file):
with open(conf_file) as stream:
lines = itertools.chain(("[global]",), stream)
self._config.read_file(lines)
return self._config['global']
|
Assumes that the config file does not have any sections, so throw it all in global
|
train
|
https://github.com/xtream1101/cutil/blob/2e4d1f00e66154b44d4ccffb9b1db3f37e87f2e8/cutil/config.py#L12-L19
| null |
class Config:
def __init__(self, conf_file=None):
self._config = configparser.ConfigParser()
self._config.optionxform = str # Keep case of keys
self.config_values = self.remove_quotes(self.load_configs(conf_file))
def remove_quotes(self, configs):
"""
Because some values are wraped in single quotes
"""
for key in configs:
value = configs[key]
if value[0] == "'" and value[-1] == "'":
configs[key] = value[1:-1]
return configs
|
xtream1101/cutil
|
cutil/config.py
|
Config.remove_quotes
|
python
|
def remove_quotes(self, configs):
for key in configs:
value = configs[key]
if value[0] == "'" and value[-1] == "'":
configs[key] = value[1:-1]
return configs
|
Because some values are wraped in single quotes
|
train
|
https://github.com/xtream1101/cutil/blob/2e4d1f00e66154b44d4ccffb9b1db3f37e87f2e8/cutil/config.py#L21-L29
| null |
class Config:
def __init__(self, conf_file=None):
self._config = configparser.ConfigParser()
self._config.optionxform = str # Keep case of keys
self.config_values = self.remove_quotes(self.load_configs(conf_file))
def load_configs(self, conf_file):
"""
Assumes that the config file does not have any sections, so throw it all in global
"""
with open(conf_file) as stream:
lines = itertools.chain(("[global]",), stream)
self._config.read_file(lines)
return self._config['global']
|
xtream1101/cutil
|
cutil/__init__.py
|
multikey_sort
|
python
|
def multikey_sort(items, columns):
comparers = [
((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1))
for col in columns
]
def cmp(a, b):
return (a > b) - (a < b)
def comparer(left, right):
comparer_iter = (
cmp(fn(left), fn(right)) * mult
for fn, mult in comparers
)
return next((result for result in comparer_iter if result), 0)
return sorted(items, key=cmp_to_key(comparer))
|
Source: https://stackoverflow.com/questions/1143671/python-sorting-list-of-dictionaries-by-multiple-keys
|
train
|
https://github.com/xtream1101/cutil/blob/2e4d1f00e66154b44d4ccffb9b1db3f37e87f2e8/cutil/__init__.py#L108-L125
| null |
from cutil.database import Database # noqa: F401
from cutil.config import Config # noqa: F401
from cutil.custom_terminal import CustomTerminal # noqa: F401
from cutil.repeating_timer import RepeatingTimer # noqa: F401
import os
import re
import sys
import uuid
import math
import time
import pytz
import json
import queue
import random
import socket
import urllib
import hashlib
import logging
import datetime
import threading
import collections
from hashids import Hashids
from functools import wraps
from operator import itemgetter
from functools import cmp_to_key
logger = logging.getLogger(__name__)
####
# Time related functions
####
def get_epoch():
"""
:return: time as epoch
"""
return int(time.time())
def get_datetime():
"""
:return: datetime object
"""
return datetime.datetime.now()
def datetime_to_str(timestamp):
return timestamp.isoformat() + "+0000"
def datetime_to_utc(timestamp):
return timestamp.astimezone(pytz.timezone('UTC'))
def str_to_date(timestamp, formats=["%Y-%m-%dT%H:%M:%S.%f%z", "%Y-%m-%dT%H:%M:%S%z"]):
rdata = None
if timestamp is None:
return None
for time_format in formats:
try:
rdata = datetime.datetime.strptime(timestamp, time_format)
except ValueError:
rdata = None
else:
# If we did not raise an exception
break
return rdata
###
# Threading
###
def threads(num_threads, data, callback, *args, **kwargs):
q = queue.Queue()
item_list = []
def _thread_run():
while True:
item = q.get()
try:
item_list.append(callback(item, *args, **kwargs))
except Exception:
logger.exception("Error in _thread_run callback {} with item:\n{}".format(callback.__name__, item))
q.task_done()
for i in range(num_threads):
t = threading.Thread(target=_thread_run)
t.daemon = True
t.start()
# Fill the Queue with the data to process
for item in data:
q.put(item)
# Start processing the data
q.join()
return item_list
####
# Other functions
####
def get_internal_ip():
return socket.gethostbyname(socket.gethostname())
def generate_key(value=None, salt=None, size=8):
if value is None:
value = random.randint(0, 999999)
if salt is None:
salt = random.randint(0, 999999)
if not isinstance(size, int):
# Enforce that size must be an int
size = 8
hashids = Hashids(salt=str(salt), min_length=size)
return hashids.encode(value)
def create_uid():
# Named uid and not uuid because this function does not have to use uuid's
uid = uuid.uuid4().hex
logger.debug("Created new uid: {uid}".format(uid=uid))
return uid
def make_url_safe(string):
# Convert special chars to % chars
return urllib.parse.quote_plus(string)
def sanitize(string):
"""
Catch and replace invalid path chars
[replace, with]
"""
replace_chars = [
['\\', '-'], [':', '-'], ['/', '-'],
['?', ''], ['<', ''], ['>', ''],
['`', '`'], ['|', '-'], ['*', '`'],
['"', '\''], ['.', ''], ['&', 'and']
]
for ch in replace_chars:
string = string.replace(ch[0], ch[1])
return string
def rreplace(s, old, new, occurrence):
"""
Taken from: http://stackoverflow.com/a/2556252
"""
li = s.rsplit(old, occurrence)
return new.join(li)
def flatten(dict_obj, prev_key='', sep='_'):
items = {}
for key, value in dict_obj.items():
new_key = prev_key + sep + key if prev_key != '' else key
if isinstance(value, dict):
items.update(flatten(value, new_key))
else:
items[new_key] = value
return items
def update_dict(d, u):
"""
Source: https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
"""
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def get_script_name(ext=False):
name = os.path.basename(sys.argv[0])
if ext is False:
name = name.split('.')[0]
return name
def chunks_of(max_chunk_size, list_to_chunk):
"""
Yields the list with a max size of max_chunk_size
"""
for i in range(0, len(list_to_chunk), max_chunk_size):
yield list_to_chunk[i:i + max_chunk_size]
def split_into(max_num_chunks, list_to_chunk):
"""
Yields the list with a max total size of max_num_chunks
"""
max_chunk_size = math.ceil(len(list_to_chunk) / max_num_chunks)
return chunks_of(max_chunk_size, list_to_chunk)
####
# File/filesystem related function
####
def get_file_ext(file):
file_name, file_extension = os.path.splitext(file)
return file_extension
def norm_path(path):
"""
:return: Proper path for os with vars expanded out
"""
# path = os.path.normcase(path)
path = os.path.expanduser(path)
path = os.path.expandvars(path)
path = os.path.normpath(path)
return path
def create_hashed_path(base_path, name, depth=2):
"""
Create a directory structure using the hashed filename
:return: string of the path to save to not including filename/ext
"""
if depth > 16:
logger.warning("depth cannot be greater then 16, setting to 16")
depth = 16
name_hash = hashlib.md5(str(name).encode('utf-8')).hexdigest()
if base_path.endswith(os.path.sep):
save_path = base_path
else:
save_path = base_path + os.path.sep
for i in range(1, depth + 1):
end = i * 2
start = end - 2
save_path += name_hash[start:end] + os.path.sep
return {'path': save_path,
'hash': name_hash,
}
def create_path(path, is_dir=False):
"""
Check if path exists, if not create it
:param path: path or file to create directory for
:param is_dir: pass True if we are passing in a directory, default = False
:return: os safe path from `path`
"""
path = norm_path(path)
path_check = path
if not is_dir:
path_check = os.path.dirname(path)
does_path_exists = os.path.exists(path_check)
if does_path_exists:
return path
try:
os.makedirs(path_check)
except OSError:
pass
return path
def dump_json(file_, data, **kwargs):
json_args = {'sort_keys': True,
'indent': 4}
json_args.update(**kwargs)
create_path(file_)
if not file_.endswith('.json'):
file_ += '.json'
with open(file_, 'w') as outfile:
json.dump(data, outfile, **json_args)
# Lets only do this once
price_pattern = re.compile('(?P<low>[\d,.\s]+)(?:\D*(?P<high>[\d,.\s]+))?')
def parse_price(price):
found_price = {'low': None,
'high': None
}
price_raw = re.search(price_pattern, price)
if price_raw:
matched = price_raw.groupdict()
found_price['low'] = matched.get('low')
found_price['high'] = matched.get('high')
for key, value in found_price.items():
if value is not None:
value = value.strip()
new_value = value.replace(',', '').replace('.', '').replace(' ', '')
try:
# Check if price has cents
if value[-3] in [',', '.']:
# Add . for cents back
new_value = new_value[:-2] + '.' + new_value[-2:]
except IndexError:
# Price is 99 or less with no cents
pass
if new_value != '':
found_price[key] = float(new_value)
else:
found_price[key] = None
return found_price
def get_image_dimension(url):
import requests
from PIL import Image # pip install pillow
from io import BytesIO
size = {'width': None,
'height': None,
}
try:
if url.startswith('//'):
url = "http:" + url
data = requests.get(url, timeout=15).content
im = Image.open(BytesIO(data))
size['width'], size['height'] = im.size
except Exception:
logger.debug("Error getting image size {url}".format(url=url), exc_info=True)
logger.warning("Error getting image size {url}".format(url=url))
return size
def crop_image(image_file, output_file=None, height=None, width=None, x=None, y=None):
from PIL import Image # pip install pillow
if output_file is None or height is None or width is None or x is None or y is None:
logger.error("Must pass in all params: output_file, height, width, x, and y as named args")
return None
if width <= 0 or height <= 0:
logger.warning("Width and height must be 1 or greater")
raise ValueError
image = Image.open(image_file)
image.crop((x, y, width + x, height + y)).save(output_file)
return output_file
####
# Decorators
####
def rate_limited(num_calls=1, every=1.0):
"""
Source: https://github.com/tomasbasham/ratelimit/tree/0ca5a616fa6d184fa180b9ad0b6fd0cf54c46936
Need to make a few changes that included having num_calls be a float
Prevent a method from being called
if it was previously called before
a time widows has elapsed.
Keyword Arguments:
num_calls (float): Maximum method invocations within a period. Must be greater than 0.
every (float): A dampening factor (in seconds). Can be any number greater than 0.
Return:
function: Decorated function that will forward method invocations if the time window has elapsed.
"""
frequency = abs(every) / float(num_calls)
def decorator(func):
"""
Extend the behaviour of the following
function, forwarding method invocations
if the time window hes elapsed.
Arguments:
func (function): The function to decorate
Returns:
function: Decorated function
"""
# To get around issues with function local scope
# and reassigning variables, we wrap the time
# within a list. When updating the value we're
# not reassigning `last_called`, which would not
# work, but instead reassigning the value at a
# particular index.
last_called = [0.0]
# Add thread safety
lock = threading.RLock()
def wrapper(*args, **kargs):
"""Decorator wrapper function"""
with lock:
elapsed = time.time() - last_called[0]
left_to_wait = frequency - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
last_called[0] = time.time()
return func(*args, **kargs)
return wrapper
return decorator
def rate_limited_old(max_per_second):
"""
Source: https://gist.github.com/gregburek/1441055
"""
lock = threading.Lock()
min_interval = 1.0 / max_per_second
def decorate(func):
last_time_called = time.perf_counter()
@wraps(func)
def rate_limited_function(*args, **kwargs):
lock.acquire()
nonlocal last_time_called
try:
elapsed = time.perf_counter() - last_time_called
left_to_wait = min_interval - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
return func(*args, **kwargs)
finally:
last_time_called = time.perf_counter()
lock.release()
return rate_limited_function
return decorate
def timeit(stat_tracker_func, name):
"""
Pass in a function and the name of the stat
Will time the function that this is a decorator to and send
the `name` as well as the value (in seconds) to `stat_tracker_func`
`stat_tracker_func` can be used to either print out the data or save it
"""
def _timeit(func):
def wrapper(*args, **kw):
start_time = time.time()
result = func(*args, **kw)
stop_time = time.time()
stat_tracker_func(name, stop_time - start_time)
return result
return wrapper
return _timeit
####
# Regex
####
# Keep re.compile's outside of fn as to only create it once
proxy_parts_pattern = re.compile('^(?:(?P<schema>\w+):\/\/)(?:(?P<user>.*):(?P<password>.*)@)?(?P<host>[^:]*)(?::(?P<port>\d+))?$')
def get_proxy_parts(proxy):
"""
Take a proxy url and break it up to its parts
"""
proxy_parts = {'schema': None,
'user': None,
'password': None,
'host': None,
'port': None,
}
# Find parts
results = re.match(proxy_parts_pattern, proxy)
if results:
matched = results.groupdict()
for key in proxy_parts:
proxy_parts[key] = matched.get(key)
else:
logger.error("Invalid proxy format `{proxy}`".format(proxy=proxy))
if proxy_parts['port'] is None:
proxy_parts['port'] = '80'
return proxy_parts
def remove_html_tag(input_str='', tag=None):
"""
Returns a string with the html tag and all its contents from a string
"""
result = input_str
if tag is not None:
pattern = re.compile('<{tag}[\s\S]+?/{tag}>'.format(tag=tag))
result = re.sub(pattern, '', str(input_str))
return result
|
xtream1101/cutil
|
cutil/__init__.py
|
sanitize
|
python
|
def sanitize(string):
replace_chars = [
['\\', '-'], [':', '-'], ['/', '-'],
['?', ''], ['<', ''], ['>', ''],
['`', '`'], ['|', '-'], ['*', '`'],
['"', '\''], ['.', ''], ['&', 'and']
]
for ch in replace_chars:
string = string.replace(ch[0], ch[1])
return string
|
Catch and replace invalid path chars
[replace, with]
|
train
|
https://github.com/xtream1101/cutil/blob/2e4d1f00e66154b44d4ccffb9b1db3f37e87f2e8/cutil/__init__.py#L159-L172
| null |
from cutil.database import Database # noqa: F401
from cutil.config import Config # noqa: F401
from cutil.custom_terminal import CustomTerminal # noqa: F401
from cutil.repeating_timer import RepeatingTimer # noqa: F401
import os
import re
import sys
import uuid
import math
import time
import pytz
import json
import queue
import random
import socket
import urllib
import hashlib
import logging
import datetime
import threading
import collections
from hashids import Hashids
from functools import wraps
from operator import itemgetter
from functools import cmp_to_key
logger = logging.getLogger(__name__)
####
# Time related functions
####
def get_epoch():
"""
:return: time as epoch
"""
return int(time.time())
def get_datetime():
"""
:return: datetime object
"""
return datetime.datetime.now()
def datetime_to_str(timestamp):
return timestamp.isoformat() + "+0000"
def datetime_to_utc(timestamp):
return timestamp.astimezone(pytz.timezone('UTC'))
def str_to_date(timestamp, formats=["%Y-%m-%dT%H:%M:%S.%f%z", "%Y-%m-%dT%H:%M:%S%z"]):
rdata = None
if timestamp is None:
return None
for time_format in formats:
try:
rdata = datetime.datetime.strptime(timestamp, time_format)
except ValueError:
rdata = None
else:
# If we did not raise an exception
break
return rdata
###
# Threading
###
def threads(num_threads, data, callback, *args, **kwargs):
q = queue.Queue()
item_list = []
def _thread_run():
while True:
item = q.get()
try:
item_list.append(callback(item, *args, **kwargs))
except Exception:
logger.exception("Error in _thread_run callback {} with item:\n{}".format(callback.__name__, item))
q.task_done()
for i in range(num_threads):
t = threading.Thread(target=_thread_run)
t.daemon = True
t.start()
# Fill the Queue with the data to process
for item in data:
q.put(item)
# Start processing the data
q.join()
return item_list
####
# Other functions
####
def multikey_sort(items, columns):
"""Source: https://stackoverflow.com/questions/1143671/python-sorting-list-of-dictionaries-by-multiple-keys
"""
comparers = [
((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1))
for col in columns
]
def cmp(a, b):
return (a > b) - (a < b)
def comparer(left, right):
comparer_iter = (
cmp(fn(left), fn(right)) * mult
for fn, mult in comparers
)
return next((result for result in comparer_iter if result), 0)
return sorted(items, key=cmp_to_key(comparer))
def get_internal_ip():
return socket.gethostbyname(socket.gethostname())
def generate_key(value=None, salt=None, size=8):
if value is None:
value = random.randint(0, 999999)
if salt is None:
salt = random.randint(0, 999999)
if not isinstance(size, int):
# Enforce that size must be an int
size = 8
hashids = Hashids(salt=str(salt), min_length=size)
return hashids.encode(value)
def create_uid():
# Named uid and not uuid because this function does not have to use uuid's
uid = uuid.uuid4().hex
logger.debug("Created new uid: {uid}".format(uid=uid))
return uid
def make_url_safe(string):
# Convert special chars to % chars
return urllib.parse.quote_plus(string)
def rreplace(s, old, new, occurrence):
"""
Taken from: http://stackoverflow.com/a/2556252
"""
li = s.rsplit(old, occurrence)
return new.join(li)
def flatten(dict_obj, prev_key='', sep='_'):
items = {}
for key, value in dict_obj.items():
new_key = prev_key + sep + key if prev_key != '' else key
if isinstance(value, dict):
items.update(flatten(value, new_key))
else:
items[new_key] = value
return items
def update_dict(d, u):
"""
Source: https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
"""
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def get_script_name(ext=False):
name = os.path.basename(sys.argv[0])
if ext is False:
name = name.split('.')[0]
return name
def chunks_of(max_chunk_size, list_to_chunk):
"""
Yields the list with a max size of max_chunk_size
"""
for i in range(0, len(list_to_chunk), max_chunk_size):
yield list_to_chunk[i:i + max_chunk_size]
def split_into(max_num_chunks, list_to_chunk):
"""
Yields the list with a max total size of max_num_chunks
"""
max_chunk_size = math.ceil(len(list_to_chunk) / max_num_chunks)
return chunks_of(max_chunk_size, list_to_chunk)
####
# File/filesystem related function
####
def get_file_ext(file):
file_name, file_extension = os.path.splitext(file)
return file_extension
def norm_path(path):
"""
:return: Proper path for os with vars expanded out
"""
# path = os.path.normcase(path)
path = os.path.expanduser(path)
path = os.path.expandvars(path)
path = os.path.normpath(path)
return path
def create_hashed_path(base_path, name, depth=2):
"""
Create a directory structure using the hashed filename
:return: string of the path to save to not including filename/ext
"""
if depth > 16:
logger.warning("depth cannot be greater then 16, setting to 16")
depth = 16
name_hash = hashlib.md5(str(name).encode('utf-8')).hexdigest()
if base_path.endswith(os.path.sep):
save_path = base_path
else:
save_path = base_path + os.path.sep
for i in range(1, depth + 1):
end = i * 2
start = end - 2
save_path += name_hash[start:end] + os.path.sep
return {'path': save_path,
'hash': name_hash,
}
def create_path(path, is_dir=False):
"""
Check if path exists, if not create it
:param path: path or file to create directory for
:param is_dir: pass True if we are passing in a directory, default = False
:return: os safe path from `path`
"""
path = norm_path(path)
path_check = path
if not is_dir:
path_check = os.path.dirname(path)
does_path_exists = os.path.exists(path_check)
if does_path_exists:
return path
try:
os.makedirs(path_check)
except OSError:
pass
return path
def dump_json(file_, data, **kwargs):
json_args = {'sort_keys': True,
'indent': 4}
json_args.update(**kwargs)
create_path(file_)
if not file_.endswith('.json'):
file_ += '.json'
with open(file_, 'w') as outfile:
json.dump(data, outfile, **json_args)
# Lets only do this once
price_pattern = re.compile('(?P<low>[\d,.\s]+)(?:\D*(?P<high>[\d,.\s]+))?')
def parse_price(price):
found_price = {'low': None,
'high': None
}
price_raw = re.search(price_pattern, price)
if price_raw:
matched = price_raw.groupdict()
found_price['low'] = matched.get('low')
found_price['high'] = matched.get('high')
for key, value in found_price.items():
if value is not None:
value = value.strip()
new_value = value.replace(',', '').replace('.', '').replace(' ', '')
try:
# Check if price has cents
if value[-3] in [',', '.']:
# Add . for cents back
new_value = new_value[:-2] + '.' + new_value[-2:]
except IndexError:
# Price is 99 or less with no cents
pass
if new_value != '':
found_price[key] = float(new_value)
else:
found_price[key] = None
return found_price
def get_image_dimension(url):
import requests
from PIL import Image # pip install pillow
from io import BytesIO
size = {'width': None,
'height': None,
}
try:
if url.startswith('//'):
url = "http:" + url
data = requests.get(url, timeout=15).content
im = Image.open(BytesIO(data))
size['width'], size['height'] = im.size
except Exception:
logger.debug("Error getting image size {url}".format(url=url), exc_info=True)
logger.warning("Error getting image size {url}".format(url=url))
return size
def crop_image(image_file, output_file=None, height=None, width=None, x=None, y=None):
from PIL import Image # pip install pillow
if output_file is None or height is None or width is None or x is None or y is None:
logger.error("Must pass in all params: output_file, height, width, x, and y as named args")
return None
if width <= 0 or height <= 0:
logger.warning("Width and height must be 1 or greater")
raise ValueError
image = Image.open(image_file)
image.crop((x, y, width + x, height + y)).save(output_file)
return output_file
####
# Decorators
####
def rate_limited(num_calls=1, every=1.0):
"""
Source: https://github.com/tomasbasham/ratelimit/tree/0ca5a616fa6d184fa180b9ad0b6fd0cf54c46936
Need to make a few changes that included having num_calls be a float
Prevent a method from being called
if it was previously called before
a time widows has elapsed.
Keyword Arguments:
num_calls (float): Maximum method invocations within a period. Must be greater than 0.
every (float): A dampening factor (in seconds). Can be any number greater than 0.
Return:
function: Decorated function that will forward method invocations if the time window has elapsed.
"""
frequency = abs(every) / float(num_calls)
def decorator(func):
"""
Extend the behaviour of the following
function, forwarding method invocations
if the time window hes elapsed.
Arguments:
func (function): The function to decorate
Returns:
function: Decorated function
"""
# To get around issues with function local scope
# and reassigning variables, we wrap the time
# within a list. When updating the value we're
# not reassigning `last_called`, which would not
# work, but instead reassigning the value at a
# particular index.
last_called = [0.0]
# Add thread safety
lock = threading.RLock()
def wrapper(*args, **kargs):
"""Decorator wrapper function"""
with lock:
elapsed = time.time() - last_called[0]
left_to_wait = frequency - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
last_called[0] = time.time()
return func(*args, **kargs)
return wrapper
return decorator
def rate_limited_old(max_per_second):
"""
Source: https://gist.github.com/gregburek/1441055
"""
lock = threading.Lock()
min_interval = 1.0 / max_per_second
def decorate(func):
last_time_called = time.perf_counter()
@wraps(func)
def rate_limited_function(*args, **kwargs):
lock.acquire()
nonlocal last_time_called
try:
elapsed = time.perf_counter() - last_time_called
left_to_wait = min_interval - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
return func(*args, **kwargs)
finally:
last_time_called = time.perf_counter()
lock.release()
return rate_limited_function
return decorate
def timeit(stat_tracker_func, name):
"""
Pass in a function and the name of the stat
Will time the function that this is a decorator to and send
the `name` as well as the value (in seconds) to `stat_tracker_func`
`stat_tracker_func` can be used to either print out the data or save it
"""
def _timeit(func):
def wrapper(*args, **kw):
start_time = time.time()
result = func(*args, **kw)
stop_time = time.time()
stat_tracker_func(name, stop_time - start_time)
return result
return wrapper
return _timeit
####
# Regex
####
# Keep re.compile's outside of fn as to only create it once
proxy_parts_pattern = re.compile('^(?:(?P<schema>\w+):\/\/)(?:(?P<user>.*):(?P<password>.*)@)?(?P<host>[^:]*)(?::(?P<port>\d+))?$')
def get_proxy_parts(proxy):
"""
Take a proxy url and break it up to its parts
"""
proxy_parts = {'schema': None,
'user': None,
'password': None,
'host': None,
'port': None,
}
# Find parts
results = re.match(proxy_parts_pattern, proxy)
if results:
matched = results.groupdict()
for key in proxy_parts:
proxy_parts[key] = matched.get(key)
else:
logger.error("Invalid proxy format `{proxy}`".format(proxy=proxy))
if proxy_parts['port'] is None:
proxy_parts['port'] = '80'
return proxy_parts
def remove_html_tag(input_str='', tag=None):
"""
Returns a string with the html tag and all its contents from a string
"""
result = input_str
if tag is not None:
pattern = re.compile('<{tag}[\s\S]+?/{tag}>'.format(tag=tag))
result = re.sub(pattern, '', str(input_str))
return result
|
xtream1101/cutil
|
cutil/__init__.py
|
chunks_of
|
python
|
def chunks_of(max_chunk_size, list_to_chunk):
for i in range(0, len(list_to_chunk), max_chunk_size):
yield list_to_chunk[i:i + max_chunk_size]
|
Yields the list with a max size of max_chunk_size
|
train
|
https://github.com/xtream1101/cutil/blob/2e4d1f00e66154b44d4ccffb9b1db3f37e87f2e8/cutil/__init__.py#L216-L221
| null |
from cutil.database import Database # noqa: F401
from cutil.config import Config # noqa: F401
from cutil.custom_terminal import CustomTerminal # noqa: F401
from cutil.repeating_timer import RepeatingTimer # noqa: F401
import os
import re
import sys
import uuid
import math
import time
import pytz
import json
import queue
import random
import socket
import urllib
import hashlib
import logging
import datetime
import threading
import collections
from hashids import Hashids
from functools import wraps
from operator import itemgetter
from functools import cmp_to_key
logger = logging.getLogger(__name__)
####
# Time related functions
####
def get_epoch():
"""
:return: time as epoch
"""
return int(time.time())
def get_datetime():
"""
:return: datetime object
"""
return datetime.datetime.now()
def datetime_to_str(timestamp):
return timestamp.isoformat() + "+0000"
def datetime_to_utc(timestamp):
return timestamp.astimezone(pytz.timezone('UTC'))
def str_to_date(timestamp, formats=["%Y-%m-%dT%H:%M:%S.%f%z", "%Y-%m-%dT%H:%M:%S%z"]):
rdata = None
if timestamp is None:
return None
for time_format in formats:
try:
rdata = datetime.datetime.strptime(timestamp, time_format)
except ValueError:
rdata = None
else:
# If we did not raise an exception
break
return rdata
###
# Threading
###
def threads(num_threads, data, callback, *args, **kwargs):
q = queue.Queue()
item_list = []
def _thread_run():
while True:
item = q.get()
try:
item_list.append(callback(item, *args, **kwargs))
except Exception:
logger.exception("Error in _thread_run callback {} with item:\n{}".format(callback.__name__, item))
q.task_done()
for i in range(num_threads):
t = threading.Thread(target=_thread_run)
t.daemon = True
t.start()
# Fill the Queue with the data to process
for item in data:
q.put(item)
# Start processing the data
q.join()
return item_list
####
# Other functions
####
def multikey_sort(items, columns):
"""Source: https://stackoverflow.com/questions/1143671/python-sorting-list-of-dictionaries-by-multiple-keys
"""
comparers = [
((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1))
for col in columns
]
def cmp(a, b):
return (a > b) - (a < b)
def comparer(left, right):
comparer_iter = (
cmp(fn(left), fn(right)) * mult
for fn, mult in comparers
)
return next((result for result in comparer_iter if result), 0)
return sorted(items, key=cmp_to_key(comparer))
def get_internal_ip():
return socket.gethostbyname(socket.gethostname())
def generate_key(value=None, salt=None, size=8):
if value is None:
value = random.randint(0, 999999)
if salt is None:
salt = random.randint(0, 999999)
if not isinstance(size, int):
# Enforce that size must be an int
size = 8
hashids = Hashids(salt=str(salt), min_length=size)
return hashids.encode(value)
def create_uid():
# Named uid and not uuid because this function does not have to use uuid's
uid = uuid.uuid4().hex
logger.debug("Created new uid: {uid}".format(uid=uid))
return uid
def make_url_safe(string):
# Convert special chars to % chars
return urllib.parse.quote_plus(string)
def sanitize(string):
"""
Catch and replace invalid path chars
[replace, with]
"""
replace_chars = [
['\\', '-'], [':', '-'], ['/', '-'],
['?', ''], ['<', ''], ['>', ''],
['`', '`'], ['|', '-'], ['*', '`'],
['"', '\''], ['.', ''], ['&', 'and']
]
for ch in replace_chars:
string = string.replace(ch[0], ch[1])
return string
def rreplace(s, old, new, occurrence):
"""
Taken from: http://stackoverflow.com/a/2556252
"""
li = s.rsplit(old, occurrence)
return new.join(li)
def flatten(dict_obj, prev_key='', sep='_'):
items = {}
for key, value in dict_obj.items():
new_key = prev_key + sep + key if prev_key != '' else key
if isinstance(value, dict):
items.update(flatten(value, new_key))
else:
items[new_key] = value
return items
def update_dict(d, u):
"""
Source: https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
"""
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def get_script_name(ext=False):
name = os.path.basename(sys.argv[0])
if ext is False:
name = name.split('.')[0]
return name
def split_into(max_num_chunks, list_to_chunk):
"""
Yields the list with a max total size of max_num_chunks
"""
max_chunk_size = math.ceil(len(list_to_chunk) / max_num_chunks)
return chunks_of(max_chunk_size, list_to_chunk)
####
# File/filesystem related function
####
def get_file_ext(file):
file_name, file_extension = os.path.splitext(file)
return file_extension
def norm_path(path):
"""
:return: Proper path for os with vars expanded out
"""
# path = os.path.normcase(path)
path = os.path.expanduser(path)
path = os.path.expandvars(path)
path = os.path.normpath(path)
return path
def create_hashed_path(base_path, name, depth=2):
"""
Create a directory structure using the hashed filename
:return: string of the path to save to not including filename/ext
"""
if depth > 16:
logger.warning("depth cannot be greater then 16, setting to 16")
depth = 16
name_hash = hashlib.md5(str(name).encode('utf-8')).hexdigest()
if base_path.endswith(os.path.sep):
save_path = base_path
else:
save_path = base_path + os.path.sep
for i in range(1, depth + 1):
end = i * 2
start = end - 2
save_path += name_hash[start:end] + os.path.sep
return {'path': save_path,
'hash': name_hash,
}
def create_path(path, is_dir=False):
"""
Check if path exists, if not create it
:param path: path or file to create directory for
:param is_dir: pass True if we are passing in a directory, default = False
:return: os safe path from `path`
"""
path = norm_path(path)
path_check = path
if not is_dir:
path_check = os.path.dirname(path)
does_path_exists = os.path.exists(path_check)
if does_path_exists:
return path
try:
os.makedirs(path_check)
except OSError:
pass
return path
def dump_json(file_, data, **kwargs):
json_args = {'sort_keys': True,
'indent': 4}
json_args.update(**kwargs)
create_path(file_)
if not file_.endswith('.json'):
file_ += '.json'
with open(file_, 'w') as outfile:
json.dump(data, outfile, **json_args)
# Lets only do this once
price_pattern = re.compile('(?P<low>[\d,.\s]+)(?:\D*(?P<high>[\d,.\s]+))?')
def parse_price(price):
found_price = {'low': None,
'high': None
}
price_raw = re.search(price_pattern, price)
if price_raw:
matched = price_raw.groupdict()
found_price['low'] = matched.get('low')
found_price['high'] = matched.get('high')
for key, value in found_price.items():
if value is not None:
value = value.strip()
new_value = value.replace(',', '').replace('.', '').replace(' ', '')
try:
# Check if price has cents
if value[-3] in [',', '.']:
# Add . for cents back
new_value = new_value[:-2] + '.' + new_value[-2:]
except IndexError:
# Price is 99 or less with no cents
pass
if new_value != '':
found_price[key] = float(new_value)
else:
found_price[key] = None
return found_price
def get_image_dimension(url):
import requests
from PIL import Image # pip install pillow
from io import BytesIO
size = {'width': None,
'height': None,
}
try:
if url.startswith('//'):
url = "http:" + url
data = requests.get(url, timeout=15).content
im = Image.open(BytesIO(data))
size['width'], size['height'] = im.size
except Exception:
logger.debug("Error getting image size {url}".format(url=url), exc_info=True)
logger.warning("Error getting image size {url}".format(url=url))
return size
def crop_image(image_file, output_file=None, height=None, width=None, x=None, y=None):
from PIL import Image # pip install pillow
if output_file is None or height is None or width is None or x is None or y is None:
logger.error("Must pass in all params: output_file, height, width, x, and y as named args")
return None
if width <= 0 or height <= 0:
logger.warning("Width and height must be 1 or greater")
raise ValueError
image = Image.open(image_file)
image.crop((x, y, width + x, height + y)).save(output_file)
return output_file
####
# Decorators
####
def rate_limited(num_calls=1, every=1.0):
"""
Source: https://github.com/tomasbasham/ratelimit/tree/0ca5a616fa6d184fa180b9ad0b6fd0cf54c46936
Need to make a few changes that included having num_calls be a float
Prevent a method from being called
if it was previously called before
a time widows has elapsed.
Keyword Arguments:
num_calls (float): Maximum method invocations within a period. Must be greater than 0.
every (float): A dampening factor (in seconds). Can be any number greater than 0.
Return:
function: Decorated function that will forward method invocations if the time window has elapsed.
"""
frequency = abs(every) / float(num_calls)
def decorator(func):
"""
Extend the behaviour of the following
function, forwarding method invocations
if the time window hes elapsed.
Arguments:
func (function): The function to decorate
Returns:
function: Decorated function
"""
# To get around issues with function local scope
# and reassigning variables, we wrap the time
# within a list. When updating the value we're
# not reassigning `last_called`, which would not
# work, but instead reassigning the value at a
# particular index.
last_called = [0.0]
# Add thread safety
lock = threading.RLock()
def wrapper(*args, **kargs):
"""Decorator wrapper function"""
with lock:
elapsed = time.time() - last_called[0]
left_to_wait = frequency - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
last_called[0] = time.time()
return func(*args, **kargs)
return wrapper
return decorator
def rate_limited_old(max_per_second):
"""
Source: https://gist.github.com/gregburek/1441055
"""
lock = threading.Lock()
min_interval = 1.0 / max_per_second
def decorate(func):
last_time_called = time.perf_counter()
@wraps(func)
def rate_limited_function(*args, **kwargs):
lock.acquire()
nonlocal last_time_called
try:
elapsed = time.perf_counter() - last_time_called
left_to_wait = min_interval - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
return func(*args, **kwargs)
finally:
last_time_called = time.perf_counter()
lock.release()
return rate_limited_function
return decorate
def timeit(stat_tracker_func, name):
"""
Pass in a function and the name of the stat
Will time the function that this is a decorator to and send
the `name` as well as the value (in seconds) to `stat_tracker_func`
`stat_tracker_func` can be used to either print out the data or save it
"""
def _timeit(func):
def wrapper(*args, **kw):
start_time = time.time()
result = func(*args, **kw)
stop_time = time.time()
stat_tracker_func(name, stop_time - start_time)
return result
return wrapper
return _timeit
####
# Regex
####
# Keep re.compile's outside of fn as to only create it once
proxy_parts_pattern = re.compile('^(?:(?P<schema>\w+):\/\/)(?:(?P<user>.*):(?P<password>.*)@)?(?P<host>[^:]*)(?::(?P<port>\d+))?$')
def get_proxy_parts(proxy):
"""
Take a proxy url and break it up to its parts
"""
proxy_parts = {'schema': None,
'user': None,
'password': None,
'host': None,
'port': None,
}
# Find parts
results = re.match(proxy_parts_pattern, proxy)
if results:
matched = results.groupdict()
for key in proxy_parts:
proxy_parts[key] = matched.get(key)
else:
logger.error("Invalid proxy format `{proxy}`".format(proxy=proxy))
if proxy_parts['port'] is None:
proxy_parts['port'] = '80'
return proxy_parts
def remove_html_tag(input_str='', tag=None):
"""
Returns a string with the html tag and all its contents from a string
"""
result = input_str
if tag is not None:
pattern = re.compile('<{tag}[\s\S]+?/{tag}>'.format(tag=tag))
result = re.sub(pattern, '', str(input_str))
return result
|
xtream1101/cutil
|
cutil/__init__.py
|
split_into
|
python
|
def split_into(max_num_chunks, list_to_chunk):
max_chunk_size = math.ceil(len(list_to_chunk) / max_num_chunks)
return chunks_of(max_chunk_size, list_to_chunk)
|
Yields the list with a max total size of max_num_chunks
|
train
|
https://github.com/xtream1101/cutil/blob/2e4d1f00e66154b44d4ccffb9b1db3f37e87f2e8/cutil/__init__.py#L224-L229
|
[
"def chunks_of(max_chunk_size, list_to_chunk):\n \"\"\"\n Yields the list with a max size of max_chunk_size\n \"\"\"\n for i in range(0, len(list_to_chunk), max_chunk_size):\n yield list_to_chunk[i:i + max_chunk_size]\n"
] |
from cutil.database import Database # noqa: F401
from cutil.config import Config # noqa: F401
from cutil.custom_terminal import CustomTerminal # noqa: F401
from cutil.repeating_timer import RepeatingTimer # noqa: F401
import os
import re
import sys
import uuid
import math
import time
import pytz
import json
import queue
import random
import socket
import urllib
import hashlib
import logging
import datetime
import threading
import collections
from hashids import Hashids
from functools import wraps
from operator import itemgetter
from functools import cmp_to_key
logger = logging.getLogger(__name__)
####
# Time related functions
####
def get_epoch():
"""
:return: time as epoch
"""
return int(time.time())
def get_datetime():
"""
:return: datetime object
"""
return datetime.datetime.now()
def datetime_to_str(timestamp):
return timestamp.isoformat() + "+0000"
def datetime_to_utc(timestamp):
return timestamp.astimezone(pytz.timezone('UTC'))
def str_to_date(timestamp, formats=["%Y-%m-%dT%H:%M:%S.%f%z", "%Y-%m-%dT%H:%M:%S%z"]):
rdata = None
if timestamp is None:
return None
for time_format in formats:
try:
rdata = datetime.datetime.strptime(timestamp, time_format)
except ValueError:
rdata = None
else:
# If we did not raise an exception
break
return rdata
###
# Threading
###
def threads(num_threads, data, callback, *args, **kwargs):
q = queue.Queue()
item_list = []
def _thread_run():
while True:
item = q.get()
try:
item_list.append(callback(item, *args, **kwargs))
except Exception:
logger.exception("Error in _thread_run callback {} with item:\n{}".format(callback.__name__, item))
q.task_done()
for i in range(num_threads):
t = threading.Thread(target=_thread_run)
t.daemon = True
t.start()
# Fill the Queue with the data to process
for item in data:
q.put(item)
# Start processing the data
q.join()
return item_list
####
# Other functions
####
def multikey_sort(items, columns):
"""Source: https://stackoverflow.com/questions/1143671/python-sorting-list-of-dictionaries-by-multiple-keys
"""
comparers = [
((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1))
for col in columns
]
def cmp(a, b):
return (a > b) - (a < b)
def comparer(left, right):
comparer_iter = (
cmp(fn(left), fn(right)) * mult
for fn, mult in comparers
)
return next((result for result in comparer_iter if result), 0)
return sorted(items, key=cmp_to_key(comparer))
def get_internal_ip():
return socket.gethostbyname(socket.gethostname())
def generate_key(value=None, salt=None, size=8):
if value is None:
value = random.randint(0, 999999)
if salt is None:
salt = random.randint(0, 999999)
if not isinstance(size, int):
# Enforce that size must be an int
size = 8
hashids = Hashids(salt=str(salt), min_length=size)
return hashids.encode(value)
def create_uid():
# Named uid and not uuid because this function does not have to use uuid's
uid = uuid.uuid4().hex
logger.debug("Created new uid: {uid}".format(uid=uid))
return uid
def make_url_safe(string):
# Convert special chars to % chars
return urllib.parse.quote_plus(string)
def sanitize(string):
"""
Catch and replace invalid path chars
[replace, with]
"""
replace_chars = [
['\\', '-'], [':', '-'], ['/', '-'],
['?', ''], ['<', ''], ['>', ''],
['`', '`'], ['|', '-'], ['*', '`'],
['"', '\''], ['.', ''], ['&', 'and']
]
for ch in replace_chars:
string = string.replace(ch[0], ch[1])
return string
def rreplace(s, old, new, occurrence):
"""
Taken from: http://stackoverflow.com/a/2556252
"""
li = s.rsplit(old, occurrence)
return new.join(li)
def flatten(dict_obj, prev_key='', sep='_'):
items = {}
for key, value in dict_obj.items():
new_key = prev_key + sep + key if prev_key != '' else key
if isinstance(value, dict):
items.update(flatten(value, new_key))
else:
items[new_key] = value
return items
def update_dict(d, u):
"""
Source: https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
"""
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def get_script_name(ext=False):
name = os.path.basename(sys.argv[0])
if ext is False:
name = name.split('.')[0]
return name
def chunks_of(max_chunk_size, list_to_chunk):
"""
Yields the list with a max size of max_chunk_size
"""
for i in range(0, len(list_to_chunk), max_chunk_size):
yield list_to_chunk[i:i + max_chunk_size]
####
# File/filesystem related function
####
def get_file_ext(file):
file_name, file_extension = os.path.splitext(file)
return file_extension
def norm_path(path):
"""
:return: Proper path for os with vars expanded out
"""
# path = os.path.normcase(path)
path = os.path.expanduser(path)
path = os.path.expandvars(path)
path = os.path.normpath(path)
return path
def create_hashed_path(base_path, name, depth=2):
"""
Create a directory structure using the hashed filename
:return: string of the path to save to not including filename/ext
"""
if depth > 16:
logger.warning("depth cannot be greater then 16, setting to 16")
depth = 16
name_hash = hashlib.md5(str(name).encode('utf-8')).hexdigest()
if base_path.endswith(os.path.sep):
save_path = base_path
else:
save_path = base_path + os.path.sep
for i in range(1, depth + 1):
end = i * 2
start = end - 2
save_path += name_hash[start:end] + os.path.sep
return {'path': save_path,
'hash': name_hash,
}
def create_path(path, is_dir=False):
"""
Check if path exists, if not create it
:param path: path or file to create directory for
:param is_dir: pass True if we are passing in a directory, default = False
:return: os safe path from `path`
"""
path = norm_path(path)
path_check = path
if not is_dir:
path_check = os.path.dirname(path)
does_path_exists = os.path.exists(path_check)
if does_path_exists:
return path
try:
os.makedirs(path_check)
except OSError:
pass
return path
def dump_json(file_, data, **kwargs):
json_args = {'sort_keys': True,
'indent': 4}
json_args.update(**kwargs)
create_path(file_)
if not file_.endswith('.json'):
file_ += '.json'
with open(file_, 'w') as outfile:
json.dump(data, outfile, **json_args)
# Lets only do this once
price_pattern = re.compile('(?P<low>[\d,.\s]+)(?:\D*(?P<high>[\d,.\s]+))?')
def parse_price(price):
found_price = {'low': None,
'high': None
}
price_raw = re.search(price_pattern, price)
if price_raw:
matched = price_raw.groupdict()
found_price['low'] = matched.get('low')
found_price['high'] = matched.get('high')
for key, value in found_price.items():
if value is not None:
value = value.strip()
new_value = value.replace(',', '').replace('.', '').replace(' ', '')
try:
# Check if price has cents
if value[-3] in [',', '.']:
# Add . for cents back
new_value = new_value[:-2] + '.' + new_value[-2:]
except IndexError:
# Price is 99 or less with no cents
pass
if new_value != '':
found_price[key] = float(new_value)
else:
found_price[key] = None
return found_price
def get_image_dimension(url):
import requests
from PIL import Image # pip install pillow
from io import BytesIO
size = {'width': None,
'height': None,
}
try:
if url.startswith('//'):
url = "http:" + url
data = requests.get(url, timeout=15).content
im = Image.open(BytesIO(data))
size['width'], size['height'] = im.size
except Exception:
logger.debug("Error getting image size {url}".format(url=url), exc_info=True)
logger.warning("Error getting image size {url}".format(url=url))
return size
def crop_image(image_file, output_file=None, height=None, width=None, x=None, y=None):
from PIL import Image # pip install pillow
if output_file is None or height is None or width is None or x is None or y is None:
logger.error("Must pass in all params: output_file, height, width, x, and y as named args")
return None
if width <= 0 or height <= 0:
logger.warning("Width and height must be 1 or greater")
raise ValueError
image = Image.open(image_file)
image.crop((x, y, width + x, height + y)).save(output_file)
return output_file
####
# Decorators
####
def rate_limited(num_calls=1, every=1.0):
"""
Source: https://github.com/tomasbasham/ratelimit/tree/0ca5a616fa6d184fa180b9ad0b6fd0cf54c46936
Need to make a few changes that included having num_calls be a float
Prevent a method from being called
if it was previously called before
a time widows has elapsed.
Keyword Arguments:
num_calls (float): Maximum method invocations within a period. Must be greater than 0.
every (float): A dampening factor (in seconds). Can be any number greater than 0.
Return:
function: Decorated function that will forward method invocations if the time window has elapsed.
"""
frequency = abs(every) / float(num_calls)
def decorator(func):
"""
Extend the behaviour of the following
function, forwarding method invocations
if the time window hes elapsed.
Arguments:
func (function): The function to decorate
Returns:
function: Decorated function
"""
# To get around issues with function local scope
# and reassigning variables, we wrap the time
# within a list. When updating the value we're
# not reassigning `last_called`, which would not
# work, but instead reassigning the value at a
# particular index.
last_called = [0.0]
# Add thread safety
lock = threading.RLock()
def wrapper(*args, **kargs):
"""Decorator wrapper function"""
with lock:
elapsed = time.time() - last_called[0]
left_to_wait = frequency - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
last_called[0] = time.time()
return func(*args, **kargs)
return wrapper
return decorator
def rate_limited_old(max_per_second):
"""
Source: https://gist.github.com/gregburek/1441055
"""
lock = threading.Lock()
min_interval = 1.0 / max_per_second
def decorate(func):
last_time_called = time.perf_counter()
@wraps(func)
def rate_limited_function(*args, **kwargs):
lock.acquire()
nonlocal last_time_called
try:
elapsed = time.perf_counter() - last_time_called
left_to_wait = min_interval - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
return func(*args, **kwargs)
finally:
last_time_called = time.perf_counter()
lock.release()
return rate_limited_function
return decorate
def timeit(stat_tracker_func, name):
"""
Pass in a function and the name of the stat
Will time the function that this is a decorator to and send
the `name` as well as the value (in seconds) to `stat_tracker_func`
`stat_tracker_func` can be used to either print out the data or save it
"""
def _timeit(func):
def wrapper(*args, **kw):
start_time = time.time()
result = func(*args, **kw)
stop_time = time.time()
stat_tracker_func(name, stop_time - start_time)
return result
return wrapper
return _timeit
####
# Regex
####
# Keep re.compile's outside of fn as to only create it once
proxy_parts_pattern = re.compile('^(?:(?P<schema>\w+):\/\/)(?:(?P<user>.*):(?P<password>.*)@)?(?P<host>[^:]*)(?::(?P<port>\d+))?$')
def get_proxy_parts(proxy):
"""
Take a proxy url and break it up to its parts
"""
proxy_parts = {'schema': None,
'user': None,
'password': None,
'host': None,
'port': None,
}
# Find parts
results = re.match(proxy_parts_pattern, proxy)
if results:
matched = results.groupdict()
for key in proxy_parts:
proxy_parts[key] = matched.get(key)
else:
logger.error("Invalid proxy format `{proxy}`".format(proxy=proxy))
if proxy_parts['port'] is None:
proxy_parts['port'] = '80'
return proxy_parts
def remove_html_tag(input_str='', tag=None):
"""
Returns a string with the html tag and all its contents from a string
"""
result = input_str
if tag is not None:
pattern = re.compile('<{tag}[\s\S]+?/{tag}>'.format(tag=tag))
result = re.sub(pattern, '', str(input_str))
return result
|
xtream1101/cutil
|
cutil/__init__.py
|
norm_path
|
python
|
def norm_path(path):
# path = os.path.normcase(path)
path = os.path.expanduser(path)
path = os.path.expandvars(path)
path = os.path.normpath(path)
return path
|
:return: Proper path for os with vars expanded out
|
train
|
https://github.com/xtream1101/cutil/blob/2e4d1f00e66154b44d4ccffb9b1db3f37e87f2e8/cutil/__init__.py#L240-L248
| null |
from cutil.database import Database # noqa: F401
from cutil.config import Config # noqa: F401
from cutil.custom_terminal import CustomTerminal # noqa: F401
from cutil.repeating_timer import RepeatingTimer # noqa: F401
import os
import re
import sys
import uuid
import math
import time
import pytz
import json
import queue
import random
import socket
import urllib
import hashlib
import logging
import datetime
import threading
import collections
from hashids import Hashids
from functools import wraps
from operator import itemgetter
from functools import cmp_to_key
logger = logging.getLogger(__name__)
####
# Time related functions
####
def get_epoch():
"""
:return: time as epoch
"""
return int(time.time())
def get_datetime():
"""
:return: datetime object
"""
return datetime.datetime.now()
def datetime_to_str(timestamp):
return timestamp.isoformat() + "+0000"
def datetime_to_utc(timestamp):
return timestamp.astimezone(pytz.timezone('UTC'))
def str_to_date(timestamp, formats=["%Y-%m-%dT%H:%M:%S.%f%z", "%Y-%m-%dT%H:%M:%S%z"]):
rdata = None
if timestamp is None:
return None
for time_format in formats:
try:
rdata = datetime.datetime.strptime(timestamp, time_format)
except ValueError:
rdata = None
else:
# If we did not raise an exception
break
return rdata
###
# Threading
###
def threads(num_threads, data, callback, *args, **kwargs):
q = queue.Queue()
item_list = []
def _thread_run():
while True:
item = q.get()
try:
item_list.append(callback(item, *args, **kwargs))
except Exception:
logger.exception("Error in _thread_run callback {} with item:\n{}".format(callback.__name__, item))
q.task_done()
for i in range(num_threads):
t = threading.Thread(target=_thread_run)
t.daemon = True
t.start()
# Fill the Queue with the data to process
for item in data:
q.put(item)
# Start processing the data
q.join()
return item_list
####
# Other functions
####
def multikey_sort(items, columns):
"""Source: https://stackoverflow.com/questions/1143671/python-sorting-list-of-dictionaries-by-multiple-keys
"""
comparers = [
((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1))
for col in columns
]
def cmp(a, b):
return (a > b) - (a < b)
def comparer(left, right):
comparer_iter = (
cmp(fn(left), fn(right)) * mult
for fn, mult in comparers
)
return next((result for result in comparer_iter if result), 0)
return sorted(items, key=cmp_to_key(comparer))
def get_internal_ip():
return socket.gethostbyname(socket.gethostname())
def generate_key(value=None, salt=None, size=8):
if value is None:
value = random.randint(0, 999999)
if salt is None:
salt = random.randint(0, 999999)
if not isinstance(size, int):
# Enforce that size must be an int
size = 8
hashids = Hashids(salt=str(salt), min_length=size)
return hashids.encode(value)
def create_uid():
# Named uid and not uuid because this function does not have to use uuid's
uid = uuid.uuid4().hex
logger.debug("Created new uid: {uid}".format(uid=uid))
return uid
def make_url_safe(string):
# Convert special chars to % chars
return urllib.parse.quote_plus(string)
def sanitize(string):
"""
Catch and replace invalid path chars
[replace, with]
"""
replace_chars = [
['\\', '-'], [':', '-'], ['/', '-'],
['?', ''], ['<', ''], ['>', ''],
['`', '`'], ['|', '-'], ['*', '`'],
['"', '\''], ['.', ''], ['&', 'and']
]
for ch in replace_chars:
string = string.replace(ch[0], ch[1])
return string
def rreplace(s, old, new, occurrence):
"""
Taken from: http://stackoverflow.com/a/2556252
"""
li = s.rsplit(old, occurrence)
return new.join(li)
def flatten(dict_obj, prev_key='', sep='_'):
items = {}
for key, value in dict_obj.items():
new_key = prev_key + sep + key if prev_key != '' else key
if isinstance(value, dict):
items.update(flatten(value, new_key))
else:
items[new_key] = value
return items
def update_dict(d, u):
"""
Source: https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
"""
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def get_script_name(ext=False):
name = os.path.basename(sys.argv[0])
if ext is False:
name = name.split('.')[0]
return name
def chunks_of(max_chunk_size, list_to_chunk):
"""
Yields the list with a max size of max_chunk_size
"""
for i in range(0, len(list_to_chunk), max_chunk_size):
yield list_to_chunk[i:i + max_chunk_size]
def split_into(max_num_chunks, list_to_chunk):
"""
Yields the list with a max total size of max_num_chunks
"""
max_chunk_size = math.ceil(len(list_to_chunk) / max_num_chunks)
return chunks_of(max_chunk_size, list_to_chunk)
####
# File/filesystem related function
####
def get_file_ext(file):
file_name, file_extension = os.path.splitext(file)
return file_extension
def create_hashed_path(base_path, name, depth=2):
"""
Create a directory structure using the hashed filename
:return: string of the path to save to not including filename/ext
"""
if depth > 16:
logger.warning("depth cannot be greater then 16, setting to 16")
depth = 16
name_hash = hashlib.md5(str(name).encode('utf-8')).hexdigest()
if base_path.endswith(os.path.sep):
save_path = base_path
else:
save_path = base_path + os.path.sep
for i in range(1, depth + 1):
end = i * 2
start = end - 2
save_path += name_hash[start:end] + os.path.sep
return {'path': save_path,
'hash': name_hash,
}
def create_path(path, is_dir=False):
"""
Check if path exists, if not create it
:param path: path or file to create directory for
:param is_dir: pass True if we are passing in a directory, default = False
:return: os safe path from `path`
"""
path = norm_path(path)
path_check = path
if not is_dir:
path_check = os.path.dirname(path)
does_path_exists = os.path.exists(path_check)
if does_path_exists:
return path
try:
os.makedirs(path_check)
except OSError:
pass
return path
def dump_json(file_, data, **kwargs):
json_args = {'sort_keys': True,
'indent': 4}
json_args.update(**kwargs)
create_path(file_)
if not file_.endswith('.json'):
file_ += '.json'
with open(file_, 'w') as outfile:
json.dump(data, outfile, **json_args)
# Lets only do this once
price_pattern = re.compile('(?P<low>[\d,.\s]+)(?:\D*(?P<high>[\d,.\s]+))?')
def parse_price(price):
found_price = {'low': None,
'high': None
}
price_raw = re.search(price_pattern, price)
if price_raw:
matched = price_raw.groupdict()
found_price['low'] = matched.get('low')
found_price['high'] = matched.get('high')
for key, value in found_price.items():
if value is not None:
value = value.strip()
new_value = value.replace(',', '').replace('.', '').replace(' ', '')
try:
# Check if price has cents
if value[-3] in [',', '.']:
# Add . for cents back
new_value = new_value[:-2] + '.' + new_value[-2:]
except IndexError:
# Price is 99 or less with no cents
pass
if new_value != '':
found_price[key] = float(new_value)
else:
found_price[key] = None
return found_price
def get_image_dimension(url):
import requests
from PIL import Image # pip install pillow
from io import BytesIO
size = {'width': None,
'height': None,
}
try:
if url.startswith('//'):
url = "http:" + url
data = requests.get(url, timeout=15).content
im = Image.open(BytesIO(data))
size['width'], size['height'] = im.size
except Exception:
logger.debug("Error getting image size {url}".format(url=url), exc_info=True)
logger.warning("Error getting image size {url}".format(url=url))
return size
def crop_image(image_file, output_file=None, height=None, width=None, x=None, y=None):
from PIL import Image # pip install pillow
if output_file is None or height is None or width is None or x is None or y is None:
logger.error("Must pass in all params: output_file, height, width, x, and y as named args")
return None
if width <= 0 or height <= 0:
logger.warning("Width and height must be 1 or greater")
raise ValueError
image = Image.open(image_file)
image.crop((x, y, width + x, height + y)).save(output_file)
return output_file
####
# Decorators
####
def rate_limited(num_calls=1, every=1.0):
"""
Source: https://github.com/tomasbasham/ratelimit/tree/0ca5a616fa6d184fa180b9ad0b6fd0cf54c46936
Need to make a few changes that included having num_calls be a float
Prevent a method from being called
if it was previously called before
a time widows has elapsed.
Keyword Arguments:
num_calls (float): Maximum method invocations within a period. Must be greater than 0.
every (float): A dampening factor (in seconds). Can be any number greater than 0.
Return:
function: Decorated function that will forward method invocations if the time window has elapsed.
"""
frequency = abs(every) / float(num_calls)
def decorator(func):
"""
Extend the behaviour of the following
function, forwarding method invocations
if the time window hes elapsed.
Arguments:
func (function): The function to decorate
Returns:
function: Decorated function
"""
# To get around issues with function local scope
# and reassigning variables, we wrap the time
# within a list. When updating the value we're
# not reassigning `last_called`, which would not
# work, but instead reassigning the value at a
# particular index.
last_called = [0.0]
# Add thread safety
lock = threading.RLock()
def wrapper(*args, **kargs):
"""Decorator wrapper function"""
with lock:
elapsed = time.time() - last_called[0]
left_to_wait = frequency - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
last_called[0] = time.time()
return func(*args, **kargs)
return wrapper
return decorator
def rate_limited_old(max_per_second):
"""
Source: https://gist.github.com/gregburek/1441055
"""
lock = threading.Lock()
min_interval = 1.0 / max_per_second
def decorate(func):
last_time_called = time.perf_counter()
@wraps(func)
def rate_limited_function(*args, **kwargs):
lock.acquire()
nonlocal last_time_called
try:
elapsed = time.perf_counter() - last_time_called
left_to_wait = min_interval - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
return func(*args, **kwargs)
finally:
last_time_called = time.perf_counter()
lock.release()
return rate_limited_function
return decorate
def timeit(stat_tracker_func, name):
"""
Pass in a function and the name of the stat
Will time the function that this is a decorator to and send
the `name` as well as the value (in seconds) to `stat_tracker_func`
`stat_tracker_func` can be used to either print out the data or save it
"""
def _timeit(func):
def wrapper(*args, **kw):
start_time = time.time()
result = func(*args, **kw)
stop_time = time.time()
stat_tracker_func(name, stop_time - start_time)
return result
return wrapper
return _timeit
####
# Regex
####
# Keep re.compile's outside of fn as to only create it once
proxy_parts_pattern = re.compile('^(?:(?P<schema>\w+):\/\/)(?:(?P<user>.*):(?P<password>.*)@)?(?P<host>[^:]*)(?::(?P<port>\d+))?$')
def get_proxy_parts(proxy):
"""
Take a proxy url and break it up to its parts
"""
proxy_parts = {'schema': None,
'user': None,
'password': None,
'host': None,
'port': None,
}
# Find parts
results = re.match(proxy_parts_pattern, proxy)
if results:
matched = results.groupdict()
for key in proxy_parts:
proxy_parts[key] = matched.get(key)
else:
logger.error("Invalid proxy format `{proxy}`".format(proxy=proxy))
if proxy_parts['port'] is None:
proxy_parts['port'] = '80'
return proxy_parts
def remove_html_tag(input_str='', tag=None):
"""
Returns a string with the html tag and all its contents from a string
"""
result = input_str
if tag is not None:
pattern = re.compile('<{tag}[\s\S]+?/{tag}>'.format(tag=tag))
result = re.sub(pattern, '', str(input_str))
return result
|
xtream1101/cutil
|
cutil/__init__.py
|
create_hashed_path
|
python
|
def create_hashed_path(base_path, name, depth=2):
if depth > 16:
logger.warning("depth cannot be greater then 16, setting to 16")
depth = 16
name_hash = hashlib.md5(str(name).encode('utf-8')).hexdigest()
if base_path.endswith(os.path.sep):
save_path = base_path
else:
save_path = base_path + os.path.sep
for i in range(1, depth + 1):
end = i * 2
start = end - 2
save_path += name_hash[start:end] + os.path.sep
return {'path': save_path,
'hash': name_hash,
}
|
Create a directory structure using the hashed filename
:return: string of the path to save to not including filename/ext
|
train
|
https://github.com/xtream1101/cutil/blob/2e4d1f00e66154b44d4ccffb9b1db3f37e87f2e8/cutil/__init__.py#L251-L272
| null |
from cutil.database import Database # noqa: F401
from cutil.config import Config # noqa: F401
from cutil.custom_terminal import CustomTerminal # noqa: F401
from cutil.repeating_timer import RepeatingTimer # noqa: F401
import os
import re
import sys
import uuid
import math
import time
import pytz
import json
import queue
import random
import socket
import urllib
import hashlib
import logging
import datetime
import threading
import collections
from hashids import Hashids
from functools import wraps
from operator import itemgetter
from functools import cmp_to_key
logger = logging.getLogger(__name__)
####
# Time related functions
####
def get_epoch():
"""
:return: time as epoch
"""
return int(time.time())
def get_datetime():
"""
:return: datetime object
"""
return datetime.datetime.now()
def datetime_to_str(timestamp):
return timestamp.isoformat() + "+0000"
def datetime_to_utc(timestamp):
return timestamp.astimezone(pytz.timezone('UTC'))
def str_to_date(timestamp, formats=["%Y-%m-%dT%H:%M:%S.%f%z", "%Y-%m-%dT%H:%M:%S%z"]):
rdata = None
if timestamp is None:
return None
for time_format in formats:
try:
rdata = datetime.datetime.strptime(timestamp, time_format)
except ValueError:
rdata = None
else:
# If we did not raise an exception
break
return rdata
###
# Threading
###
def threads(num_threads, data, callback, *args, **kwargs):
q = queue.Queue()
item_list = []
def _thread_run():
while True:
item = q.get()
try:
item_list.append(callback(item, *args, **kwargs))
except Exception:
logger.exception("Error in _thread_run callback {} with item:\n{}".format(callback.__name__, item))
q.task_done()
for i in range(num_threads):
t = threading.Thread(target=_thread_run)
t.daemon = True
t.start()
# Fill the Queue with the data to process
for item in data:
q.put(item)
# Start processing the data
q.join()
return item_list
####
# Other functions
####
def multikey_sort(items, columns):
"""Source: https://stackoverflow.com/questions/1143671/python-sorting-list-of-dictionaries-by-multiple-keys
"""
comparers = [
((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1))
for col in columns
]
def cmp(a, b):
return (a > b) - (a < b)
def comparer(left, right):
comparer_iter = (
cmp(fn(left), fn(right)) * mult
for fn, mult in comparers
)
return next((result for result in comparer_iter if result), 0)
return sorted(items, key=cmp_to_key(comparer))
def get_internal_ip():
return socket.gethostbyname(socket.gethostname())
def generate_key(value=None, salt=None, size=8):
if value is None:
value = random.randint(0, 999999)
if salt is None:
salt = random.randint(0, 999999)
if not isinstance(size, int):
# Enforce that size must be an int
size = 8
hashids = Hashids(salt=str(salt), min_length=size)
return hashids.encode(value)
def create_uid():
# Named uid and not uuid because this function does not have to use uuid's
uid = uuid.uuid4().hex
logger.debug("Created new uid: {uid}".format(uid=uid))
return uid
def make_url_safe(string):
# Convert special chars to % chars
return urllib.parse.quote_plus(string)
def sanitize(string):
"""
Catch and replace invalid path chars
[replace, with]
"""
replace_chars = [
['\\', '-'], [':', '-'], ['/', '-'],
['?', ''], ['<', ''], ['>', ''],
['`', '`'], ['|', '-'], ['*', '`'],
['"', '\''], ['.', ''], ['&', 'and']
]
for ch in replace_chars:
string = string.replace(ch[0], ch[1])
return string
def rreplace(s, old, new, occurrence):
"""
Taken from: http://stackoverflow.com/a/2556252
"""
li = s.rsplit(old, occurrence)
return new.join(li)
def flatten(dict_obj, prev_key='', sep='_'):
items = {}
for key, value in dict_obj.items():
new_key = prev_key + sep + key if prev_key != '' else key
if isinstance(value, dict):
items.update(flatten(value, new_key))
else:
items[new_key] = value
return items
def update_dict(d, u):
"""
Source: https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
"""
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def get_script_name(ext=False):
name = os.path.basename(sys.argv[0])
if ext is False:
name = name.split('.')[0]
return name
def chunks_of(max_chunk_size, list_to_chunk):
"""
Yields the list with a max size of max_chunk_size
"""
for i in range(0, len(list_to_chunk), max_chunk_size):
yield list_to_chunk[i:i + max_chunk_size]
def split_into(max_num_chunks, list_to_chunk):
"""
Yields the list with a max total size of max_num_chunks
"""
max_chunk_size = math.ceil(len(list_to_chunk) / max_num_chunks)
return chunks_of(max_chunk_size, list_to_chunk)
####
# File/filesystem related function
####
def get_file_ext(file):
file_name, file_extension = os.path.splitext(file)
return file_extension
def norm_path(path):
"""
:return: Proper path for os with vars expanded out
"""
# path = os.path.normcase(path)
path = os.path.expanduser(path)
path = os.path.expandvars(path)
path = os.path.normpath(path)
return path
def create_path(path, is_dir=False):
"""
Check if path exists, if not create it
:param path: path or file to create directory for
:param is_dir: pass True if we are passing in a directory, default = False
:return: os safe path from `path`
"""
path = norm_path(path)
path_check = path
if not is_dir:
path_check = os.path.dirname(path)
does_path_exists = os.path.exists(path_check)
if does_path_exists:
return path
try:
os.makedirs(path_check)
except OSError:
pass
return path
def dump_json(file_, data, **kwargs):
json_args = {'sort_keys': True,
'indent': 4}
json_args.update(**kwargs)
create_path(file_)
if not file_.endswith('.json'):
file_ += '.json'
with open(file_, 'w') as outfile:
json.dump(data, outfile, **json_args)
# Lets only do this once
price_pattern = re.compile('(?P<low>[\d,.\s]+)(?:\D*(?P<high>[\d,.\s]+))?')
def parse_price(price):
found_price = {'low': None,
'high': None
}
price_raw = re.search(price_pattern, price)
if price_raw:
matched = price_raw.groupdict()
found_price['low'] = matched.get('low')
found_price['high'] = matched.get('high')
for key, value in found_price.items():
if value is not None:
value = value.strip()
new_value = value.replace(',', '').replace('.', '').replace(' ', '')
try:
# Check if price has cents
if value[-3] in [',', '.']:
# Add . for cents back
new_value = new_value[:-2] + '.' + new_value[-2:]
except IndexError:
# Price is 99 or less with no cents
pass
if new_value != '':
found_price[key] = float(new_value)
else:
found_price[key] = None
return found_price
def get_image_dimension(url):
import requests
from PIL import Image # pip install pillow
from io import BytesIO
size = {'width': None,
'height': None,
}
try:
if url.startswith('//'):
url = "http:" + url
data = requests.get(url, timeout=15).content
im = Image.open(BytesIO(data))
size['width'], size['height'] = im.size
except Exception:
logger.debug("Error getting image size {url}".format(url=url), exc_info=True)
logger.warning("Error getting image size {url}".format(url=url))
return size
def crop_image(image_file, output_file=None, height=None, width=None, x=None, y=None):
from PIL import Image # pip install pillow
if output_file is None or height is None or width is None or x is None or y is None:
logger.error("Must pass in all params: output_file, height, width, x, and y as named args")
return None
if width <= 0 or height <= 0:
logger.warning("Width and height must be 1 or greater")
raise ValueError
image = Image.open(image_file)
image.crop((x, y, width + x, height + y)).save(output_file)
return output_file
####
# Decorators
####
def rate_limited(num_calls=1, every=1.0):
"""
Source: https://github.com/tomasbasham/ratelimit/tree/0ca5a616fa6d184fa180b9ad0b6fd0cf54c46936
Need to make a few changes that included having num_calls be a float
Prevent a method from being called
if it was previously called before
a time widows has elapsed.
Keyword Arguments:
num_calls (float): Maximum method invocations within a period. Must be greater than 0.
every (float): A dampening factor (in seconds). Can be any number greater than 0.
Return:
function: Decorated function that will forward method invocations if the time window has elapsed.
"""
frequency = abs(every) / float(num_calls)
def decorator(func):
"""
Extend the behaviour of the following
function, forwarding method invocations
if the time window hes elapsed.
Arguments:
func (function): The function to decorate
Returns:
function: Decorated function
"""
# To get around issues with function local scope
# and reassigning variables, we wrap the time
# within a list. When updating the value we're
# not reassigning `last_called`, which would not
# work, but instead reassigning the value at a
# particular index.
last_called = [0.0]
# Add thread safety
lock = threading.RLock()
def wrapper(*args, **kargs):
"""Decorator wrapper function"""
with lock:
elapsed = time.time() - last_called[0]
left_to_wait = frequency - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
last_called[0] = time.time()
return func(*args, **kargs)
return wrapper
return decorator
def rate_limited_old(max_per_second):
"""
Source: https://gist.github.com/gregburek/1441055
"""
lock = threading.Lock()
min_interval = 1.0 / max_per_second
def decorate(func):
last_time_called = time.perf_counter()
@wraps(func)
def rate_limited_function(*args, **kwargs):
lock.acquire()
nonlocal last_time_called
try:
elapsed = time.perf_counter() - last_time_called
left_to_wait = min_interval - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
return func(*args, **kwargs)
finally:
last_time_called = time.perf_counter()
lock.release()
return rate_limited_function
return decorate
def timeit(stat_tracker_func, name):
"""
Pass in a function and the name of the stat
Will time the function that this is a decorator to and send
the `name` as well as the value (in seconds) to `stat_tracker_func`
`stat_tracker_func` can be used to either print out the data or save it
"""
def _timeit(func):
def wrapper(*args, **kw):
start_time = time.time()
result = func(*args, **kw)
stop_time = time.time()
stat_tracker_func(name, stop_time - start_time)
return result
return wrapper
return _timeit
####
# Regex
####
# Keep re.compile's outside of fn as to only create it once
proxy_parts_pattern = re.compile('^(?:(?P<schema>\w+):\/\/)(?:(?P<user>.*):(?P<password>.*)@)?(?P<host>[^:]*)(?::(?P<port>\d+))?$')
def get_proxy_parts(proxy):
"""
Take a proxy url and break it up to its parts
"""
proxy_parts = {'schema': None,
'user': None,
'password': None,
'host': None,
'port': None,
}
# Find parts
results = re.match(proxy_parts_pattern, proxy)
if results:
matched = results.groupdict()
for key in proxy_parts:
proxy_parts[key] = matched.get(key)
else:
logger.error("Invalid proxy format `{proxy}`".format(proxy=proxy))
if proxy_parts['port'] is None:
proxy_parts['port'] = '80'
return proxy_parts
def remove_html_tag(input_str='', tag=None):
"""
Returns a string with the html tag and all its contents from a string
"""
result = input_str
if tag is not None:
pattern = re.compile('<{tag}[\s\S]+?/{tag}>'.format(tag=tag))
result = re.sub(pattern, '', str(input_str))
return result
|
xtream1101/cutil
|
cutil/__init__.py
|
create_path
|
python
|
def create_path(path, is_dir=False):
path = norm_path(path)
path_check = path
if not is_dir:
path_check = os.path.dirname(path)
does_path_exists = os.path.exists(path_check)
if does_path_exists:
return path
try:
os.makedirs(path_check)
except OSError:
pass
return path
|
Check if path exists, if not create it
:param path: path or file to create directory for
:param is_dir: pass True if we are passing in a directory, default = False
:return: os safe path from `path`
|
train
|
https://github.com/xtream1101/cutil/blob/2e4d1f00e66154b44d4ccffb9b1db3f37e87f2e8/cutil/__init__.py#L275-L297
|
[
"def norm_path(path):\n \"\"\"\n :return: Proper path for os with vars expanded out\n \"\"\"\n # path = os.path.normcase(path)\n path = os.path.expanduser(path)\n path = os.path.expandvars(path)\n path = os.path.normpath(path)\n return path\n"
] |
from cutil.database import Database # noqa: F401
from cutil.config import Config # noqa: F401
from cutil.custom_terminal import CustomTerminal # noqa: F401
from cutil.repeating_timer import RepeatingTimer # noqa: F401
import os
import re
import sys
import uuid
import math
import time
import pytz
import json
import queue
import random
import socket
import urllib
import hashlib
import logging
import datetime
import threading
import collections
from hashids import Hashids
from functools import wraps
from operator import itemgetter
from functools import cmp_to_key
logger = logging.getLogger(__name__)
####
# Time related functions
####
def get_epoch():
"""
:return: time as epoch
"""
return int(time.time())
def get_datetime():
"""
:return: datetime object
"""
return datetime.datetime.now()
def datetime_to_str(timestamp):
return timestamp.isoformat() + "+0000"
def datetime_to_utc(timestamp):
return timestamp.astimezone(pytz.timezone('UTC'))
def str_to_date(timestamp, formats=["%Y-%m-%dT%H:%M:%S.%f%z", "%Y-%m-%dT%H:%M:%S%z"]):
rdata = None
if timestamp is None:
return None
for time_format in formats:
try:
rdata = datetime.datetime.strptime(timestamp, time_format)
except ValueError:
rdata = None
else:
# If we did not raise an exception
break
return rdata
###
# Threading
###
def threads(num_threads, data, callback, *args, **kwargs):
q = queue.Queue()
item_list = []
def _thread_run():
while True:
item = q.get()
try:
item_list.append(callback(item, *args, **kwargs))
except Exception:
logger.exception("Error in _thread_run callback {} with item:\n{}".format(callback.__name__, item))
q.task_done()
for i in range(num_threads):
t = threading.Thread(target=_thread_run)
t.daemon = True
t.start()
# Fill the Queue with the data to process
for item in data:
q.put(item)
# Start processing the data
q.join()
return item_list
####
# Other functions
####
def multikey_sort(items, columns):
"""Source: https://stackoverflow.com/questions/1143671/python-sorting-list-of-dictionaries-by-multiple-keys
"""
comparers = [
((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1))
for col in columns
]
def cmp(a, b):
return (a > b) - (a < b)
def comparer(left, right):
comparer_iter = (
cmp(fn(left), fn(right)) * mult
for fn, mult in comparers
)
return next((result for result in comparer_iter if result), 0)
return sorted(items, key=cmp_to_key(comparer))
def get_internal_ip():
return socket.gethostbyname(socket.gethostname())
def generate_key(value=None, salt=None, size=8):
if value is None:
value = random.randint(0, 999999)
if salt is None:
salt = random.randint(0, 999999)
if not isinstance(size, int):
# Enforce that size must be an int
size = 8
hashids = Hashids(salt=str(salt), min_length=size)
return hashids.encode(value)
def create_uid():
# Named uid and not uuid because this function does not have to use uuid's
uid = uuid.uuid4().hex
logger.debug("Created new uid: {uid}".format(uid=uid))
return uid
def make_url_safe(string):
# Convert special chars to % chars
return urllib.parse.quote_plus(string)
def sanitize(string):
"""
Catch and replace invalid path chars
[replace, with]
"""
replace_chars = [
['\\', '-'], [':', '-'], ['/', '-'],
['?', ''], ['<', ''], ['>', ''],
['`', '`'], ['|', '-'], ['*', '`'],
['"', '\''], ['.', ''], ['&', 'and']
]
for ch in replace_chars:
string = string.replace(ch[0], ch[1])
return string
def rreplace(s, old, new, occurrence):
"""
Taken from: http://stackoverflow.com/a/2556252
"""
li = s.rsplit(old, occurrence)
return new.join(li)
def flatten(dict_obj, prev_key='', sep='_'):
items = {}
for key, value in dict_obj.items():
new_key = prev_key + sep + key if prev_key != '' else key
if isinstance(value, dict):
items.update(flatten(value, new_key))
else:
items[new_key] = value
return items
def update_dict(d, u):
"""
Source: https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
"""
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def get_script_name(ext=False):
name = os.path.basename(sys.argv[0])
if ext is False:
name = name.split('.')[0]
return name
def chunks_of(max_chunk_size, list_to_chunk):
"""
Yields the list with a max size of max_chunk_size
"""
for i in range(0, len(list_to_chunk), max_chunk_size):
yield list_to_chunk[i:i + max_chunk_size]
def split_into(max_num_chunks, list_to_chunk):
"""
Yields the list with a max total size of max_num_chunks
"""
max_chunk_size = math.ceil(len(list_to_chunk) / max_num_chunks)
return chunks_of(max_chunk_size, list_to_chunk)
####
# File/filesystem related function
####
def get_file_ext(file):
file_name, file_extension = os.path.splitext(file)
return file_extension
def norm_path(path):
"""
:return: Proper path for os with vars expanded out
"""
# path = os.path.normcase(path)
path = os.path.expanduser(path)
path = os.path.expandvars(path)
path = os.path.normpath(path)
return path
def create_hashed_path(base_path, name, depth=2):
"""
Create a directory structure using the hashed filename
:return: string of the path to save to not including filename/ext
"""
if depth > 16:
logger.warning("depth cannot be greater then 16, setting to 16")
depth = 16
name_hash = hashlib.md5(str(name).encode('utf-8')).hexdigest()
if base_path.endswith(os.path.sep):
save_path = base_path
else:
save_path = base_path + os.path.sep
for i in range(1, depth + 1):
end = i * 2
start = end - 2
save_path += name_hash[start:end] + os.path.sep
return {'path': save_path,
'hash': name_hash,
}
def dump_json(file_, data, **kwargs):
json_args = {'sort_keys': True,
'indent': 4}
json_args.update(**kwargs)
create_path(file_)
if not file_.endswith('.json'):
file_ += '.json'
with open(file_, 'w') as outfile:
json.dump(data, outfile, **json_args)
# Lets only do this once
price_pattern = re.compile('(?P<low>[\d,.\s]+)(?:\D*(?P<high>[\d,.\s]+))?')
def parse_price(price):
found_price = {'low': None,
'high': None
}
price_raw = re.search(price_pattern, price)
if price_raw:
matched = price_raw.groupdict()
found_price['low'] = matched.get('low')
found_price['high'] = matched.get('high')
for key, value in found_price.items():
if value is not None:
value = value.strip()
new_value = value.replace(',', '').replace('.', '').replace(' ', '')
try:
# Check if price has cents
if value[-3] in [',', '.']:
# Add . for cents back
new_value = new_value[:-2] + '.' + new_value[-2:]
except IndexError:
# Price is 99 or less with no cents
pass
if new_value != '':
found_price[key] = float(new_value)
else:
found_price[key] = None
return found_price
def get_image_dimension(url):
import requests
from PIL import Image # pip install pillow
from io import BytesIO
size = {'width': None,
'height': None,
}
try:
if url.startswith('//'):
url = "http:" + url
data = requests.get(url, timeout=15).content
im = Image.open(BytesIO(data))
size['width'], size['height'] = im.size
except Exception:
logger.debug("Error getting image size {url}".format(url=url), exc_info=True)
logger.warning("Error getting image size {url}".format(url=url))
return size
def crop_image(image_file, output_file=None, height=None, width=None, x=None, y=None):
from PIL import Image # pip install pillow
if output_file is None or height is None or width is None or x is None or y is None:
logger.error("Must pass in all params: output_file, height, width, x, and y as named args")
return None
if width <= 0 or height <= 0:
logger.warning("Width and height must be 1 or greater")
raise ValueError
image = Image.open(image_file)
image.crop((x, y, width + x, height + y)).save(output_file)
return output_file
####
# Decorators
####
def rate_limited(num_calls=1, every=1.0):
"""
Source: https://github.com/tomasbasham/ratelimit/tree/0ca5a616fa6d184fa180b9ad0b6fd0cf54c46936
Need to make a few changes that included having num_calls be a float
Prevent a method from being called
if it was previously called before
a time widows has elapsed.
Keyword Arguments:
num_calls (float): Maximum method invocations within a period. Must be greater than 0.
every (float): A dampening factor (in seconds). Can be any number greater than 0.
Return:
function: Decorated function that will forward method invocations if the time window has elapsed.
"""
frequency = abs(every) / float(num_calls)
def decorator(func):
"""
Extend the behaviour of the following
function, forwarding method invocations
if the time window hes elapsed.
Arguments:
func (function): The function to decorate
Returns:
function: Decorated function
"""
# To get around issues with function local scope
# and reassigning variables, we wrap the time
# within a list. When updating the value we're
# not reassigning `last_called`, which would not
# work, but instead reassigning the value at a
# particular index.
last_called = [0.0]
# Add thread safety
lock = threading.RLock()
def wrapper(*args, **kargs):
"""Decorator wrapper function"""
with lock:
elapsed = time.time() - last_called[0]
left_to_wait = frequency - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
last_called[0] = time.time()
return func(*args, **kargs)
return wrapper
return decorator
def rate_limited_old(max_per_second):
"""
Source: https://gist.github.com/gregburek/1441055
"""
lock = threading.Lock()
min_interval = 1.0 / max_per_second
def decorate(func):
last_time_called = time.perf_counter()
@wraps(func)
def rate_limited_function(*args, **kwargs):
lock.acquire()
nonlocal last_time_called
try:
elapsed = time.perf_counter() - last_time_called
left_to_wait = min_interval - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
return func(*args, **kwargs)
finally:
last_time_called = time.perf_counter()
lock.release()
return rate_limited_function
return decorate
def timeit(stat_tracker_func, name):
"""
Pass in a function and the name of the stat
Will time the function that this is a decorator to and send
the `name` as well as the value (in seconds) to `stat_tracker_func`
`stat_tracker_func` can be used to either print out the data or save it
"""
def _timeit(func):
def wrapper(*args, **kw):
start_time = time.time()
result = func(*args, **kw)
stop_time = time.time()
stat_tracker_func(name, stop_time - start_time)
return result
return wrapper
return _timeit
####
# Regex
####
# Keep re.compile's outside of fn as to only create it once
proxy_parts_pattern = re.compile('^(?:(?P<schema>\w+):\/\/)(?:(?P<user>.*):(?P<password>.*)@)?(?P<host>[^:]*)(?::(?P<port>\d+))?$')
def get_proxy_parts(proxy):
"""
Take a proxy url and break it up to its parts
"""
proxy_parts = {'schema': None,
'user': None,
'password': None,
'host': None,
'port': None,
}
# Find parts
results = re.match(proxy_parts_pattern, proxy)
if results:
matched = results.groupdict()
for key in proxy_parts:
proxy_parts[key] = matched.get(key)
else:
logger.error("Invalid proxy format `{proxy}`".format(proxy=proxy))
if proxy_parts['port'] is None:
proxy_parts['port'] = '80'
return proxy_parts
def remove_html_tag(input_str='', tag=None):
"""
Returns a string with the html tag and all its contents from a string
"""
result = input_str
if tag is not None:
pattern = re.compile('<{tag}[\s\S]+?/{tag}>'.format(tag=tag))
result = re.sub(pattern, '', str(input_str))
return result
|
xtream1101/cutil
|
cutil/__init__.py
|
rate_limited
|
python
|
def rate_limited(num_calls=1, every=1.0):
frequency = abs(every) / float(num_calls)
def decorator(func):
"""
Extend the behaviour of the following
function, forwarding method invocations
if the time window hes elapsed.
Arguments:
func (function): The function to decorate
Returns:
function: Decorated function
"""
# To get around issues with function local scope
# and reassigning variables, we wrap the time
# within a list. When updating the value we're
# not reassigning `last_called`, which would not
# work, but instead reassigning the value at a
# particular index.
last_called = [0.0]
# Add thread safety
lock = threading.RLock()
def wrapper(*args, **kargs):
"""Decorator wrapper function"""
with lock:
elapsed = time.time() - last_called[0]
left_to_wait = frequency - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
last_called[0] = time.time()
return func(*args, **kargs)
return wrapper
return decorator
|
Source: https://github.com/tomasbasham/ratelimit/tree/0ca5a616fa6d184fa180b9ad0b6fd0cf54c46936
Need to make a few changes that included having num_calls be a float
Prevent a method from being called
if it was previously called before
a time widows has elapsed.
Keyword Arguments:
num_calls (float): Maximum method invocations within a period. Must be greater than 0.
every (float): A dampening factor (in seconds). Can be any number greater than 0.
Return:
function: Decorated function that will forward method invocations if the time window has elapsed.
|
train
|
https://github.com/xtream1101/cutil/blob/2e4d1f00e66154b44d4ccffb9b1db3f37e87f2e8/cutil/__init__.py#L388-L440
| null |
from cutil.database import Database # noqa: F401
from cutil.config import Config # noqa: F401
from cutil.custom_terminal import CustomTerminal # noqa: F401
from cutil.repeating_timer import RepeatingTimer # noqa: F401
import os
import re
import sys
import uuid
import math
import time
import pytz
import json
import queue
import random
import socket
import urllib
import hashlib
import logging
import datetime
import threading
import collections
from hashids import Hashids
from functools import wraps
from operator import itemgetter
from functools import cmp_to_key
logger = logging.getLogger(__name__)
####
# Time related functions
####
def get_epoch():
"""
:return: time as epoch
"""
return int(time.time())
def get_datetime():
"""
:return: datetime object
"""
return datetime.datetime.now()
def datetime_to_str(timestamp):
return timestamp.isoformat() + "+0000"
def datetime_to_utc(timestamp):
return timestamp.astimezone(pytz.timezone('UTC'))
def str_to_date(timestamp, formats=["%Y-%m-%dT%H:%M:%S.%f%z", "%Y-%m-%dT%H:%M:%S%z"]):
rdata = None
if timestamp is None:
return None
for time_format in formats:
try:
rdata = datetime.datetime.strptime(timestamp, time_format)
except ValueError:
rdata = None
else:
# If we did not raise an exception
break
return rdata
###
# Threading
###
def threads(num_threads, data, callback, *args, **kwargs):
q = queue.Queue()
item_list = []
def _thread_run():
while True:
item = q.get()
try:
item_list.append(callback(item, *args, **kwargs))
except Exception:
logger.exception("Error in _thread_run callback {} with item:\n{}".format(callback.__name__, item))
q.task_done()
for i in range(num_threads):
t = threading.Thread(target=_thread_run)
t.daemon = True
t.start()
# Fill the Queue with the data to process
for item in data:
q.put(item)
# Start processing the data
q.join()
return item_list
####
# Other functions
####
def multikey_sort(items, columns):
"""Source: https://stackoverflow.com/questions/1143671/python-sorting-list-of-dictionaries-by-multiple-keys
"""
comparers = [
((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1))
for col in columns
]
def cmp(a, b):
return (a > b) - (a < b)
def comparer(left, right):
comparer_iter = (
cmp(fn(left), fn(right)) * mult
for fn, mult in comparers
)
return next((result for result in comparer_iter if result), 0)
return sorted(items, key=cmp_to_key(comparer))
def get_internal_ip():
return socket.gethostbyname(socket.gethostname())
def generate_key(value=None, salt=None, size=8):
if value is None:
value = random.randint(0, 999999)
if salt is None:
salt = random.randint(0, 999999)
if not isinstance(size, int):
# Enforce that size must be an int
size = 8
hashids = Hashids(salt=str(salt), min_length=size)
return hashids.encode(value)
def create_uid():
# Named uid and not uuid because this function does not have to use uuid's
uid = uuid.uuid4().hex
logger.debug("Created new uid: {uid}".format(uid=uid))
return uid
def make_url_safe(string):
# Convert special chars to % chars
return urllib.parse.quote_plus(string)
def sanitize(string):
"""
Catch and replace invalid path chars
[replace, with]
"""
replace_chars = [
['\\', '-'], [':', '-'], ['/', '-'],
['?', ''], ['<', ''], ['>', ''],
['`', '`'], ['|', '-'], ['*', '`'],
['"', '\''], ['.', ''], ['&', 'and']
]
for ch in replace_chars:
string = string.replace(ch[0], ch[1])
return string
def rreplace(s, old, new, occurrence):
"""
Taken from: http://stackoverflow.com/a/2556252
"""
li = s.rsplit(old, occurrence)
return new.join(li)
def flatten(dict_obj, prev_key='', sep='_'):
items = {}
for key, value in dict_obj.items():
new_key = prev_key + sep + key if prev_key != '' else key
if isinstance(value, dict):
items.update(flatten(value, new_key))
else:
items[new_key] = value
return items
def update_dict(d, u):
"""
Source: https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
"""
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def get_script_name(ext=False):
name = os.path.basename(sys.argv[0])
if ext is False:
name = name.split('.')[0]
return name
def chunks_of(max_chunk_size, list_to_chunk):
"""
Yields the list with a max size of max_chunk_size
"""
for i in range(0, len(list_to_chunk), max_chunk_size):
yield list_to_chunk[i:i + max_chunk_size]
def split_into(max_num_chunks, list_to_chunk):
"""
Yields the list with a max total size of max_num_chunks
"""
max_chunk_size = math.ceil(len(list_to_chunk) / max_num_chunks)
return chunks_of(max_chunk_size, list_to_chunk)
####
# File/filesystem related function
####
def get_file_ext(file):
file_name, file_extension = os.path.splitext(file)
return file_extension
def norm_path(path):
"""
:return: Proper path for os with vars expanded out
"""
# path = os.path.normcase(path)
path = os.path.expanduser(path)
path = os.path.expandvars(path)
path = os.path.normpath(path)
return path
def create_hashed_path(base_path, name, depth=2):
"""
Create a directory structure using the hashed filename
:return: string of the path to save to not including filename/ext
"""
if depth > 16:
logger.warning("depth cannot be greater then 16, setting to 16")
depth = 16
name_hash = hashlib.md5(str(name).encode('utf-8')).hexdigest()
if base_path.endswith(os.path.sep):
save_path = base_path
else:
save_path = base_path + os.path.sep
for i in range(1, depth + 1):
end = i * 2
start = end - 2
save_path += name_hash[start:end] + os.path.sep
return {'path': save_path,
'hash': name_hash,
}
def create_path(path, is_dir=False):
"""
Check if path exists, if not create it
:param path: path or file to create directory for
:param is_dir: pass True if we are passing in a directory, default = False
:return: os safe path from `path`
"""
path = norm_path(path)
path_check = path
if not is_dir:
path_check = os.path.dirname(path)
does_path_exists = os.path.exists(path_check)
if does_path_exists:
return path
try:
os.makedirs(path_check)
except OSError:
pass
return path
def dump_json(file_, data, **kwargs):
json_args = {'sort_keys': True,
'indent': 4}
json_args.update(**kwargs)
create_path(file_)
if not file_.endswith('.json'):
file_ += '.json'
with open(file_, 'w') as outfile:
json.dump(data, outfile, **json_args)
# Lets only do this once
price_pattern = re.compile('(?P<low>[\d,.\s]+)(?:\D*(?P<high>[\d,.\s]+))?')
def parse_price(price):
found_price = {'low': None,
'high': None
}
price_raw = re.search(price_pattern, price)
if price_raw:
matched = price_raw.groupdict()
found_price['low'] = matched.get('low')
found_price['high'] = matched.get('high')
for key, value in found_price.items():
if value is not None:
value = value.strip()
new_value = value.replace(',', '').replace('.', '').replace(' ', '')
try:
# Check if price has cents
if value[-3] in [',', '.']:
# Add . for cents back
new_value = new_value[:-2] + '.' + new_value[-2:]
except IndexError:
# Price is 99 or less with no cents
pass
if new_value != '':
found_price[key] = float(new_value)
else:
found_price[key] = None
return found_price
def get_image_dimension(url):
import requests
from PIL import Image # pip install pillow
from io import BytesIO
size = {'width': None,
'height': None,
}
try:
if url.startswith('//'):
url = "http:" + url
data = requests.get(url, timeout=15).content
im = Image.open(BytesIO(data))
size['width'], size['height'] = im.size
except Exception:
logger.debug("Error getting image size {url}".format(url=url), exc_info=True)
logger.warning("Error getting image size {url}".format(url=url))
return size
def crop_image(image_file, output_file=None, height=None, width=None, x=None, y=None):
from PIL import Image # pip install pillow
if output_file is None or height is None or width is None or x is None or y is None:
logger.error("Must pass in all params: output_file, height, width, x, and y as named args")
return None
if width <= 0 or height <= 0:
logger.warning("Width and height must be 1 or greater")
raise ValueError
image = Image.open(image_file)
image.crop((x, y, width + x, height + y)).save(output_file)
return output_file
####
# Decorators
####
def rate_limited_old(max_per_second):
"""
Source: https://gist.github.com/gregburek/1441055
"""
lock = threading.Lock()
min_interval = 1.0 / max_per_second
def decorate(func):
last_time_called = time.perf_counter()
@wraps(func)
def rate_limited_function(*args, **kwargs):
lock.acquire()
nonlocal last_time_called
try:
elapsed = time.perf_counter() - last_time_called
left_to_wait = min_interval - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
return func(*args, **kwargs)
finally:
last_time_called = time.perf_counter()
lock.release()
return rate_limited_function
return decorate
def timeit(stat_tracker_func, name):
"""
Pass in a function and the name of the stat
Will time the function that this is a decorator to and send
the `name` as well as the value (in seconds) to `stat_tracker_func`
`stat_tracker_func` can be used to either print out the data or save it
"""
def _timeit(func):
def wrapper(*args, **kw):
start_time = time.time()
result = func(*args, **kw)
stop_time = time.time()
stat_tracker_func(name, stop_time - start_time)
return result
return wrapper
return _timeit
####
# Regex
####
# Keep re.compile's outside of fn as to only create it once
proxy_parts_pattern = re.compile('^(?:(?P<schema>\w+):\/\/)(?:(?P<user>.*):(?P<password>.*)@)?(?P<host>[^:]*)(?::(?P<port>\d+))?$')
def get_proxy_parts(proxy):
"""
Take a proxy url and break it up to its parts
"""
proxy_parts = {'schema': None,
'user': None,
'password': None,
'host': None,
'port': None,
}
# Find parts
results = re.match(proxy_parts_pattern, proxy)
if results:
matched = results.groupdict()
for key in proxy_parts:
proxy_parts[key] = matched.get(key)
else:
logger.error("Invalid proxy format `{proxy}`".format(proxy=proxy))
if proxy_parts['port'] is None:
proxy_parts['port'] = '80'
return proxy_parts
def remove_html_tag(input_str='', tag=None):
"""
Returns a string with the html tag and all its contents from a string
"""
result = input_str
if tag is not None:
pattern = re.compile('<{tag}[\s\S]+?/{tag}>'.format(tag=tag))
result = re.sub(pattern, '', str(input_str))
return result
|
xtream1101/cutil
|
cutil/__init__.py
|
rate_limited_old
|
python
|
def rate_limited_old(max_per_second):
lock = threading.Lock()
min_interval = 1.0 / max_per_second
def decorate(func):
last_time_called = time.perf_counter()
@wraps(func)
def rate_limited_function(*args, **kwargs):
lock.acquire()
nonlocal last_time_called
try:
elapsed = time.perf_counter() - last_time_called
left_to_wait = min_interval - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
return func(*args, **kwargs)
finally:
last_time_called = time.perf_counter()
lock.release()
return rate_limited_function
return decorate
|
Source: https://gist.github.com/gregburek/1441055
|
train
|
https://github.com/xtream1101/cutil/blob/2e4d1f00e66154b44d4ccffb9b1db3f37e87f2e8/cutil/__init__.py#L443-L470
| null |
from cutil.database import Database # noqa: F401
from cutil.config import Config # noqa: F401
from cutil.custom_terminal import CustomTerminal # noqa: F401
from cutil.repeating_timer import RepeatingTimer # noqa: F401
import os
import re
import sys
import uuid
import math
import time
import pytz
import json
import queue
import random
import socket
import urllib
import hashlib
import logging
import datetime
import threading
import collections
from hashids import Hashids
from functools import wraps
from operator import itemgetter
from functools import cmp_to_key
logger = logging.getLogger(__name__)
####
# Time related functions
####
def get_epoch():
"""
:return: time as epoch
"""
return int(time.time())
def get_datetime():
"""
:return: datetime object
"""
return datetime.datetime.now()
def datetime_to_str(timestamp):
return timestamp.isoformat() + "+0000"
def datetime_to_utc(timestamp):
return timestamp.astimezone(pytz.timezone('UTC'))
def str_to_date(timestamp, formats=["%Y-%m-%dT%H:%M:%S.%f%z", "%Y-%m-%dT%H:%M:%S%z"]):
rdata = None
if timestamp is None:
return None
for time_format in formats:
try:
rdata = datetime.datetime.strptime(timestamp, time_format)
except ValueError:
rdata = None
else:
# If we did not raise an exception
break
return rdata
###
# Threading
###
def threads(num_threads, data, callback, *args, **kwargs):
q = queue.Queue()
item_list = []
def _thread_run():
while True:
item = q.get()
try:
item_list.append(callback(item, *args, **kwargs))
except Exception:
logger.exception("Error in _thread_run callback {} with item:\n{}".format(callback.__name__, item))
q.task_done()
for i in range(num_threads):
t = threading.Thread(target=_thread_run)
t.daemon = True
t.start()
# Fill the Queue with the data to process
for item in data:
q.put(item)
# Start processing the data
q.join()
return item_list
####
# Other functions
####
def multikey_sort(items, columns):
"""Source: https://stackoverflow.com/questions/1143671/python-sorting-list-of-dictionaries-by-multiple-keys
"""
comparers = [
((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1))
for col in columns
]
def cmp(a, b):
return (a > b) - (a < b)
def comparer(left, right):
comparer_iter = (
cmp(fn(left), fn(right)) * mult
for fn, mult in comparers
)
return next((result for result in comparer_iter if result), 0)
return sorted(items, key=cmp_to_key(comparer))
def get_internal_ip():
return socket.gethostbyname(socket.gethostname())
def generate_key(value=None, salt=None, size=8):
if value is None:
value = random.randint(0, 999999)
if salt is None:
salt = random.randint(0, 999999)
if not isinstance(size, int):
# Enforce that size must be an int
size = 8
hashids = Hashids(salt=str(salt), min_length=size)
return hashids.encode(value)
def create_uid():
# Named uid and not uuid because this function does not have to use uuid's
uid = uuid.uuid4().hex
logger.debug("Created new uid: {uid}".format(uid=uid))
return uid
def make_url_safe(string):
# Convert special chars to % chars
return urllib.parse.quote_plus(string)
def sanitize(string):
"""
Catch and replace invalid path chars
[replace, with]
"""
replace_chars = [
['\\', '-'], [':', '-'], ['/', '-'],
['?', ''], ['<', ''], ['>', ''],
['`', '`'], ['|', '-'], ['*', '`'],
['"', '\''], ['.', ''], ['&', 'and']
]
for ch in replace_chars:
string = string.replace(ch[0], ch[1])
return string
def rreplace(s, old, new, occurrence):
"""
Taken from: http://stackoverflow.com/a/2556252
"""
li = s.rsplit(old, occurrence)
return new.join(li)
def flatten(dict_obj, prev_key='', sep='_'):
items = {}
for key, value in dict_obj.items():
new_key = prev_key + sep + key if prev_key != '' else key
if isinstance(value, dict):
items.update(flatten(value, new_key))
else:
items[new_key] = value
return items
def update_dict(d, u):
"""
Source: https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
"""
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def get_script_name(ext=False):
name = os.path.basename(sys.argv[0])
if ext is False:
name = name.split('.')[0]
return name
def chunks_of(max_chunk_size, list_to_chunk):
"""
Yields the list with a max size of max_chunk_size
"""
for i in range(0, len(list_to_chunk), max_chunk_size):
yield list_to_chunk[i:i + max_chunk_size]
def split_into(max_num_chunks, list_to_chunk):
"""
Yields the list with a max total size of max_num_chunks
"""
max_chunk_size = math.ceil(len(list_to_chunk) / max_num_chunks)
return chunks_of(max_chunk_size, list_to_chunk)
####
# File/filesystem related function
####
def get_file_ext(file):
file_name, file_extension = os.path.splitext(file)
return file_extension
def norm_path(path):
"""
:return: Proper path for os with vars expanded out
"""
# path = os.path.normcase(path)
path = os.path.expanduser(path)
path = os.path.expandvars(path)
path = os.path.normpath(path)
return path
def create_hashed_path(base_path, name, depth=2):
"""
Create a directory structure using the hashed filename
:return: string of the path to save to not including filename/ext
"""
if depth > 16:
logger.warning("depth cannot be greater then 16, setting to 16")
depth = 16
name_hash = hashlib.md5(str(name).encode('utf-8')).hexdigest()
if base_path.endswith(os.path.sep):
save_path = base_path
else:
save_path = base_path + os.path.sep
for i in range(1, depth + 1):
end = i * 2
start = end - 2
save_path += name_hash[start:end] + os.path.sep
return {'path': save_path,
'hash': name_hash,
}
def create_path(path, is_dir=False):
"""
Check if path exists, if not create it
:param path: path or file to create directory for
:param is_dir: pass True if we are passing in a directory, default = False
:return: os safe path from `path`
"""
path = norm_path(path)
path_check = path
if not is_dir:
path_check = os.path.dirname(path)
does_path_exists = os.path.exists(path_check)
if does_path_exists:
return path
try:
os.makedirs(path_check)
except OSError:
pass
return path
def dump_json(file_, data, **kwargs):
json_args = {'sort_keys': True,
'indent': 4}
json_args.update(**kwargs)
create_path(file_)
if not file_.endswith('.json'):
file_ += '.json'
with open(file_, 'w') as outfile:
json.dump(data, outfile, **json_args)
# Lets only do this once
price_pattern = re.compile('(?P<low>[\d,.\s]+)(?:\D*(?P<high>[\d,.\s]+))?')
def parse_price(price):
found_price = {'low': None,
'high': None
}
price_raw = re.search(price_pattern, price)
if price_raw:
matched = price_raw.groupdict()
found_price['low'] = matched.get('low')
found_price['high'] = matched.get('high')
for key, value in found_price.items():
if value is not None:
value = value.strip()
new_value = value.replace(',', '').replace('.', '').replace(' ', '')
try:
# Check if price has cents
if value[-3] in [',', '.']:
# Add . for cents back
new_value = new_value[:-2] + '.' + new_value[-2:]
except IndexError:
# Price is 99 or less with no cents
pass
if new_value != '':
found_price[key] = float(new_value)
else:
found_price[key] = None
return found_price
def get_image_dimension(url):
import requests
from PIL import Image # pip install pillow
from io import BytesIO
size = {'width': None,
'height': None,
}
try:
if url.startswith('//'):
url = "http:" + url
data = requests.get(url, timeout=15).content
im = Image.open(BytesIO(data))
size['width'], size['height'] = im.size
except Exception:
logger.debug("Error getting image size {url}".format(url=url), exc_info=True)
logger.warning("Error getting image size {url}".format(url=url))
return size
def crop_image(image_file, output_file=None, height=None, width=None, x=None, y=None):
from PIL import Image # pip install pillow
if output_file is None or height is None or width is None or x is None or y is None:
logger.error("Must pass in all params: output_file, height, width, x, and y as named args")
return None
if width <= 0 or height <= 0:
logger.warning("Width and height must be 1 or greater")
raise ValueError
image = Image.open(image_file)
image.crop((x, y, width + x, height + y)).save(output_file)
return output_file
####
# Decorators
####
def rate_limited(num_calls=1, every=1.0):
"""
Source: https://github.com/tomasbasham/ratelimit/tree/0ca5a616fa6d184fa180b9ad0b6fd0cf54c46936
Need to make a few changes that included having num_calls be a float
Prevent a method from being called
if it was previously called before
a time widows has elapsed.
Keyword Arguments:
num_calls (float): Maximum method invocations within a period. Must be greater than 0.
every (float): A dampening factor (in seconds). Can be any number greater than 0.
Return:
function: Decorated function that will forward method invocations if the time window has elapsed.
"""
frequency = abs(every) / float(num_calls)
def decorator(func):
"""
Extend the behaviour of the following
function, forwarding method invocations
if the time window hes elapsed.
Arguments:
func (function): The function to decorate
Returns:
function: Decorated function
"""
# To get around issues with function local scope
# and reassigning variables, we wrap the time
# within a list. When updating the value we're
# not reassigning `last_called`, which would not
# work, but instead reassigning the value at a
# particular index.
last_called = [0.0]
# Add thread safety
lock = threading.RLock()
def wrapper(*args, **kargs):
"""Decorator wrapper function"""
with lock:
elapsed = time.time() - last_called[0]
left_to_wait = frequency - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
last_called[0] = time.time()
return func(*args, **kargs)
return wrapper
return decorator
def timeit(stat_tracker_func, name):
"""
Pass in a function and the name of the stat
Will time the function that this is a decorator to and send
the `name` as well as the value (in seconds) to `stat_tracker_func`
`stat_tracker_func` can be used to either print out the data or save it
"""
def _timeit(func):
def wrapper(*args, **kw):
start_time = time.time()
result = func(*args, **kw)
stop_time = time.time()
stat_tracker_func(name, stop_time - start_time)
return result
return wrapper
return _timeit
####
# Regex
####
# Keep re.compile's outside of fn as to only create it once
proxy_parts_pattern = re.compile('^(?:(?P<schema>\w+):\/\/)(?:(?P<user>.*):(?P<password>.*)@)?(?P<host>[^:]*)(?::(?P<port>\d+))?$')
def get_proxy_parts(proxy):
"""
Take a proxy url and break it up to its parts
"""
proxy_parts = {'schema': None,
'user': None,
'password': None,
'host': None,
'port': None,
}
# Find parts
results = re.match(proxy_parts_pattern, proxy)
if results:
matched = results.groupdict()
for key in proxy_parts:
proxy_parts[key] = matched.get(key)
else:
logger.error("Invalid proxy format `{proxy}`".format(proxy=proxy))
if proxy_parts['port'] is None:
proxy_parts['port'] = '80'
return proxy_parts
def remove_html_tag(input_str='', tag=None):
"""
Returns a string with the html tag and all its contents from a string
"""
result = input_str
if tag is not None:
pattern = re.compile('<{tag}[\s\S]+?/{tag}>'.format(tag=tag))
result = re.sub(pattern, '', str(input_str))
return result
|
xtream1101/cutil
|
cutil/__init__.py
|
timeit
|
python
|
def timeit(stat_tracker_func, name):
def _timeit(func):
def wrapper(*args, **kw):
start_time = time.time()
result = func(*args, **kw)
stop_time = time.time()
stat_tracker_func(name, stop_time - start_time)
return result
return wrapper
return _timeit
|
Pass in a function and the name of the stat
Will time the function that this is a decorator to and send
the `name` as well as the value (in seconds) to `stat_tracker_func`
`stat_tracker_func` can be used to either print out the data or save it
|
train
|
https://github.com/xtream1101/cutil/blob/2e4d1f00e66154b44d4ccffb9b1db3f37e87f2e8/cutil/__init__.py#L473-L491
| null |
from cutil.database import Database # noqa: F401
from cutil.config import Config # noqa: F401
from cutil.custom_terminal import CustomTerminal # noqa: F401
from cutil.repeating_timer import RepeatingTimer # noqa: F401
import os
import re
import sys
import uuid
import math
import time
import pytz
import json
import queue
import random
import socket
import urllib
import hashlib
import logging
import datetime
import threading
import collections
from hashids import Hashids
from functools import wraps
from operator import itemgetter
from functools import cmp_to_key
logger = logging.getLogger(__name__)
####
# Time related functions
####
def get_epoch():
"""
:return: time as epoch
"""
return int(time.time())
def get_datetime():
"""
:return: datetime object
"""
return datetime.datetime.now()
def datetime_to_str(timestamp):
return timestamp.isoformat() + "+0000"
def datetime_to_utc(timestamp):
return timestamp.astimezone(pytz.timezone('UTC'))
def str_to_date(timestamp, formats=["%Y-%m-%dT%H:%M:%S.%f%z", "%Y-%m-%dT%H:%M:%S%z"]):
rdata = None
if timestamp is None:
return None
for time_format in formats:
try:
rdata = datetime.datetime.strptime(timestamp, time_format)
except ValueError:
rdata = None
else:
# If we did not raise an exception
break
return rdata
###
# Threading
###
def threads(num_threads, data, callback, *args, **kwargs):
q = queue.Queue()
item_list = []
def _thread_run():
while True:
item = q.get()
try:
item_list.append(callback(item, *args, **kwargs))
except Exception:
logger.exception("Error in _thread_run callback {} with item:\n{}".format(callback.__name__, item))
q.task_done()
for i in range(num_threads):
t = threading.Thread(target=_thread_run)
t.daemon = True
t.start()
# Fill the Queue with the data to process
for item in data:
q.put(item)
# Start processing the data
q.join()
return item_list
####
# Other functions
####
def multikey_sort(items, columns):
"""Source: https://stackoverflow.com/questions/1143671/python-sorting-list-of-dictionaries-by-multiple-keys
"""
comparers = [
((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1))
for col in columns
]
def cmp(a, b):
return (a > b) - (a < b)
def comparer(left, right):
comparer_iter = (
cmp(fn(left), fn(right)) * mult
for fn, mult in comparers
)
return next((result for result in comparer_iter if result), 0)
return sorted(items, key=cmp_to_key(comparer))
def get_internal_ip():
return socket.gethostbyname(socket.gethostname())
def generate_key(value=None, salt=None, size=8):
if value is None:
value = random.randint(0, 999999)
if salt is None:
salt = random.randint(0, 999999)
if not isinstance(size, int):
# Enforce that size must be an int
size = 8
hashids = Hashids(salt=str(salt), min_length=size)
return hashids.encode(value)
def create_uid():
# Named uid and not uuid because this function does not have to use uuid's
uid = uuid.uuid4().hex
logger.debug("Created new uid: {uid}".format(uid=uid))
return uid
def make_url_safe(string):
# Convert special chars to % chars
return urllib.parse.quote_plus(string)
def sanitize(string):
"""
Catch and replace invalid path chars
[replace, with]
"""
replace_chars = [
['\\', '-'], [':', '-'], ['/', '-'],
['?', ''], ['<', ''], ['>', ''],
['`', '`'], ['|', '-'], ['*', '`'],
['"', '\''], ['.', ''], ['&', 'and']
]
for ch in replace_chars:
string = string.replace(ch[0], ch[1])
return string
def rreplace(s, old, new, occurrence):
"""
Taken from: http://stackoverflow.com/a/2556252
"""
li = s.rsplit(old, occurrence)
return new.join(li)
def flatten(dict_obj, prev_key='', sep='_'):
items = {}
for key, value in dict_obj.items():
new_key = prev_key + sep + key if prev_key != '' else key
if isinstance(value, dict):
items.update(flatten(value, new_key))
else:
items[new_key] = value
return items
def update_dict(d, u):
"""
Source: https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
"""
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def get_script_name(ext=False):
name = os.path.basename(sys.argv[0])
if ext is False:
name = name.split('.')[0]
return name
def chunks_of(max_chunk_size, list_to_chunk):
"""
Yields the list with a max size of max_chunk_size
"""
for i in range(0, len(list_to_chunk), max_chunk_size):
yield list_to_chunk[i:i + max_chunk_size]
def split_into(max_num_chunks, list_to_chunk):
"""
Yields the list with a max total size of max_num_chunks
"""
max_chunk_size = math.ceil(len(list_to_chunk) / max_num_chunks)
return chunks_of(max_chunk_size, list_to_chunk)
####
# File/filesystem related function
####
def get_file_ext(file):
file_name, file_extension = os.path.splitext(file)
return file_extension
def norm_path(path):
"""
:return: Proper path for os with vars expanded out
"""
# path = os.path.normcase(path)
path = os.path.expanduser(path)
path = os.path.expandvars(path)
path = os.path.normpath(path)
return path
def create_hashed_path(base_path, name, depth=2):
"""
Create a directory structure using the hashed filename
:return: string of the path to save to not including filename/ext
"""
if depth > 16:
logger.warning("depth cannot be greater then 16, setting to 16")
depth = 16
name_hash = hashlib.md5(str(name).encode('utf-8')).hexdigest()
if base_path.endswith(os.path.sep):
save_path = base_path
else:
save_path = base_path + os.path.sep
for i in range(1, depth + 1):
end = i * 2
start = end - 2
save_path += name_hash[start:end] + os.path.sep
return {'path': save_path,
'hash': name_hash,
}
def create_path(path, is_dir=False):
"""
Check if path exists, if not create it
:param path: path or file to create directory for
:param is_dir: pass True if we are passing in a directory, default = False
:return: os safe path from `path`
"""
path = norm_path(path)
path_check = path
if not is_dir:
path_check = os.path.dirname(path)
does_path_exists = os.path.exists(path_check)
if does_path_exists:
return path
try:
os.makedirs(path_check)
except OSError:
pass
return path
def dump_json(file_, data, **kwargs):
json_args = {'sort_keys': True,
'indent': 4}
json_args.update(**kwargs)
create_path(file_)
if not file_.endswith('.json'):
file_ += '.json'
with open(file_, 'w') as outfile:
json.dump(data, outfile, **json_args)
# Lets only do this once
price_pattern = re.compile('(?P<low>[\d,.\s]+)(?:\D*(?P<high>[\d,.\s]+))?')
def parse_price(price):
found_price = {'low': None,
'high': None
}
price_raw = re.search(price_pattern, price)
if price_raw:
matched = price_raw.groupdict()
found_price['low'] = matched.get('low')
found_price['high'] = matched.get('high')
for key, value in found_price.items():
if value is not None:
value = value.strip()
new_value = value.replace(',', '').replace('.', '').replace(' ', '')
try:
# Check if price has cents
if value[-3] in [',', '.']:
# Add . for cents back
new_value = new_value[:-2] + '.' + new_value[-2:]
except IndexError:
# Price is 99 or less with no cents
pass
if new_value != '':
found_price[key] = float(new_value)
else:
found_price[key] = None
return found_price
def get_image_dimension(url):
import requests
from PIL import Image # pip install pillow
from io import BytesIO
size = {'width': None,
'height': None,
}
try:
if url.startswith('//'):
url = "http:" + url
data = requests.get(url, timeout=15).content
im = Image.open(BytesIO(data))
size['width'], size['height'] = im.size
except Exception:
logger.debug("Error getting image size {url}".format(url=url), exc_info=True)
logger.warning("Error getting image size {url}".format(url=url))
return size
def crop_image(image_file, output_file=None, height=None, width=None, x=None, y=None):
from PIL import Image # pip install pillow
if output_file is None or height is None or width is None or x is None or y is None:
logger.error("Must pass in all params: output_file, height, width, x, and y as named args")
return None
if width <= 0 or height <= 0:
logger.warning("Width and height must be 1 or greater")
raise ValueError
image = Image.open(image_file)
image.crop((x, y, width + x, height + y)).save(output_file)
return output_file
####
# Decorators
####
def rate_limited(num_calls=1, every=1.0):
"""
Source: https://github.com/tomasbasham/ratelimit/tree/0ca5a616fa6d184fa180b9ad0b6fd0cf54c46936
Need to make a few changes that included having num_calls be a float
Prevent a method from being called
if it was previously called before
a time widows has elapsed.
Keyword Arguments:
num_calls (float): Maximum method invocations within a period. Must be greater than 0.
every (float): A dampening factor (in seconds). Can be any number greater than 0.
Return:
function: Decorated function that will forward method invocations if the time window has elapsed.
"""
frequency = abs(every) / float(num_calls)
def decorator(func):
"""
Extend the behaviour of the following
function, forwarding method invocations
if the time window hes elapsed.
Arguments:
func (function): The function to decorate
Returns:
function: Decorated function
"""
# To get around issues with function local scope
# and reassigning variables, we wrap the time
# within a list. When updating the value we're
# not reassigning `last_called`, which would not
# work, but instead reassigning the value at a
# particular index.
last_called = [0.0]
# Add thread safety
lock = threading.RLock()
def wrapper(*args, **kargs):
"""Decorator wrapper function"""
with lock:
elapsed = time.time() - last_called[0]
left_to_wait = frequency - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
last_called[0] = time.time()
return func(*args, **kargs)
return wrapper
return decorator
def rate_limited_old(max_per_second):
"""
Source: https://gist.github.com/gregburek/1441055
"""
lock = threading.Lock()
min_interval = 1.0 / max_per_second
def decorate(func):
last_time_called = time.perf_counter()
@wraps(func)
def rate_limited_function(*args, **kwargs):
lock.acquire()
nonlocal last_time_called
try:
elapsed = time.perf_counter() - last_time_called
left_to_wait = min_interval - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
return func(*args, **kwargs)
finally:
last_time_called = time.perf_counter()
lock.release()
return rate_limited_function
return decorate
####
# Regex
####
# Keep re.compile's outside of fn as to only create it once
proxy_parts_pattern = re.compile('^(?:(?P<schema>\w+):\/\/)(?:(?P<user>.*):(?P<password>.*)@)?(?P<host>[^:]*)(?::(?P<port>\d+))?$')
def get_proxy_parts(proxy):
"""
Take a proxy url and break it up to its parts
"""
proxy_parts = {'schema': None,
'user': None,
'password': None,
'host': None,
'port': None,
}
# Find parts
results = re.match(proxy_parts_pattern, proxy)
if results:
matched = results.groupdict()
for key in proxy_parts:
proxy_parts[key] = matched.get(key)
else:
logger.error("Invalid proxy format `{proxy}`".format(proxy=proxy))
if proxy_parts['port'] is None:
proxy_parts['port'] = '80'
return proxy_parts
def remove_html_tag(input_str='', tag=None):
"""
Returns a string with the html tag and all its contents from a string
"""
result = input_str
if tag is not None:
pattern = re.compile('<{tag}[\s\S]+?/{tag}>'.format(tag=tag))
result = re.sub(pattern, '', str(input_str))
return result
|
xtream1101/cutil
|
cutil/__init__.py
|
get_proxy_parts
|
python
|
def get_proxy_parts(proxy):
proxy_parts = {'schema': None,
'user': None,
'password': None,
'host': None,
'port': None,
}
# Find parts
results = re.match(proxy_parts_pattern, proxy)
if results:
matched = results.groupdict()
for key in proxy_parts:
proxy_parts[key] = matched.get(key)
else:
logger.error("Invalid proxy format `{proxy}`".format(proxy=proxy))
if proxy_parts['port'] is None:
proxy_parts['port'] = '80'
return proxy_parts
|
Take a proxy url and break it up to its parts
|
train
|
https://github.com/xtream1101/cutil/blob/2e4d1f00e66154b44d4ccffb9b1db3f37e87f2e8/cutil/__init__.py#L501-L524
| null |
from cutil.database import Database # noqa: F401
from cutil.config import Config # noqa: F401
from cutil.custom_terminal import CustomTerminal # noqa: F401
from cutil.repeating_timer import RepeatingTimer # noqa: F401
import os
import re
import sys
import uuid
import math
import time
import pytz
import json
import queue
import random
import socket
import urllib
import hashlib
import logging
import datetime
import threading
import collections
from hashids import Hashids
from functools import wraps
from operator import itemgetter
from functools import cmp_to_key
logger = logging.getLogger(__name__)
####
# Time related functions
####
def get_epoch():
"""
:return: time as epoch
"""
return int(time.time())
def get_datetime():
"""
:return: datetime object
"""
return datetime.datetime.now()
def datetime_to_str(timestamp):
return timestamp.isoformat() + "+0000"
def datetime_to_utc(timestamp):
return timestamp.astimezone(pytz.timezone('UTC'))
def str_to_date(timestamp, formats=["%Y-%m-%dT%H:%M:%S.%f%z", "%Y-%m-%dT%H:%M:%S%z"]):
rdata = None
if timestamp is None:
return None
for time_format in formats:
try:
rdata = datetime.datetime.strptime(timestamp, time_format)
except ValueError:
rdata = None
else:
# If we did not raise an exception
break
return rdata
###
# Threading
###
def threads(num_threads, data, callback, *args, **kwargs):
q = queue.Queue()
item_list = []
def _thread_run():
while True:
item = q.get()
try:
item_list.append(callback(item, *args, **kwargs))
except Exception:
logger.exception("Error in _thread_run callback {} with item:\n{}".format(callback.__name__, item))
q.task_done()
for i in range(num_threads):
t = threading.Thread(target=_thread_run)
t.daemon = True
t.start()
# Fill the Queue with the data to process
for item in data:
q.put(item)
# Start processing the data
q.join()
return item_list
####
# Other functions
####
def multikey_sort(items, columns):
"""Source: https://stackoverflow.com/questions/1143671/python-sorting-list-of-dictionaries-by-multiple-keys
"""
comparers = [
((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1))
for col in columns
]
def cmp(a, b):
return (a > b) - (a < b)
def comparer(left, right):
comparer_iter = (
cmp(fn(left), fn(right)) * mult
for fn, mult in comparers
)
return next((result for result in comparer_iter if result), 0)
return sorted(items, key=cmp_to_key(comparer))
def get_internal_ip():
return socket.gethostbyname(socket.gethostname())
def generate_key(value=None, salt=None, size=8):
if value is None:
value = random.randint(0, 999999)
if salt is None:
salt = random.randint(0, 999999)
if not isinstance(size, int):
# Enforce that size must be an int
size = 8
hashids = Hashids(salt=str(salt), min_length=size)
return hashids.encode(value)
def create_uid():
# Named uid and not uuid because this function does not have to use uuid's
uid = uuid.uuid4().hex
logger.debug("Created new uid: {uid}".format(uid=uid))
return uid
def make_url_safe(string):
# Convert special chars to % chars
return urllib.parse.quote_plus(string)
def sanitize(string):
"""
Catch and replace invalid path chars
[replace, with]
"""
replace_chars = [
['\\', '-'], [':', '-'], ['/', '-'],
['?', ''], ['<', ''], ['>', ''],
['`', '`'], ['|', '-'], ['*', '`'],
['"', '\''], ['.', ''], ['&', 'and']
]
for ch in replace_chars:
string = string.replace(ch[0], ch[1])
return string
def rreplace(s, old, new, occurrence):
"""
Taken from: http://stackoverflow.com/a/2556252
"""
li = s.rsplit(old, occurrence)
return new.join(li)
def flatten(dict_obj, prev_key='', sep='_'):
items = {}
for key, value in dict_obj.items():
new_key = prev_key + sep + key if prev_key != '' else key
if isinstance(value, dict):
items.update(flatten(value, new_key))
else:
items[new_key] = value
return items
def update_dict(d, u):
"""
Source: https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
"""
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def get_script_name(ext=False):
name = os.path.basename(sys.argv[0])
if ext is False:
name = name.split('.')[0]
return name
def chunks_of(max_chunk_size, list_to_chunk):
"""
Yields the list with a max size of max_chunk_size
"""
for i in range(0, len(list_to_chunk), max_chunk_size):
yield list_to_chunk[i:i + max_chunk_size]
def split_into(max_num_chunks, list_to_chunk):
"""
Yields the list with a max total size of max_num_chunks
"""
max_chunk_size = math.ceil(len(list_to_chunk) / max_num_chunks)
return chunks_of(max_chunk_size, list_to_chunk)
####
# File/filesystem related function
####
def get_file_ext(file):
file_name, file_extension = os.path.splitext(file)
return file_extension
def norm_path(path):
"""
:return: Proper path for os with vars expanded out
"""
# path = os.path.normcase(path)
path = os.path.expanduser(path)
path = os.path.expandvars(path)
path = os.path.normpath(path)
return path
def create_hashed_path(base_path, name, depth=2):
"""
Create a directory structure using the hashed filename
:return: string of the path to save to not including filename/ext
"""
if depth > 16:
logger.warning("depth cannot be greater then 16, setting to 16")
depth = 16
name_hash = hashlib.md5(str(name).encode('utf-8')).hexdigest()
if base_path.endswith(os.path.sep):
save_path = base_path
else:
save_path = base_path + os.path.sep
for i in range(1, depth + 1):
end = i * 2
start = end - 2
save_path += name_hash[start:end] + os.path.sep
return {'path': save_path,
'hash': name_hash,
}
def create_path(path, is_dir=False):
"""
Check if path exists, if not create it
:param path: path or file to create directory for
:param is_dir: pass True if we are passing in a directory, default = False
:return: os safe path from `path`
"""
path = norm_path(path)
path_check = path
if not is_dir:
path_check = os.path.dirname(path)
does_path_exists = os.path.exists(path_check)
if does_path_exists:
return path
try:
os.makedirs(path_check)
except OSError:
pass
return path
def dump_json(file_, data, **kwargs):
json_args = {'sort_keys': True,
'indent': 4}
json_args.update(**kwargs)
create_path(file_)
if not file_.endswith('.json'):
file_ += '.json'
with open(file_, 'w') as outfile:
json.dump(data, outfile, **json_args)
# Lets only do this once
price_pattern = re.compile('(?P<low>[\d,.\s]+)(?:\D*(?P<high>[\d,.\s]+))?')
def parse_price(price):
found_price = {'low': None,
'high': None
}
price_raw = re.search(price_pattern, price)
if price_raw:
matched = price_raw.groupdict()
found_price['low'] = matched.get('low')
found_price['high'] = matched.get('high')
for key, value in found_price.items():
if value is not None:
value = value.strip()
new_value = value.replace(',', '').replace('.', '').replace(' ', '')
try:
# Check if price has cents
if value[-3] in [',', '.']:
# Add . for cents back
new_value = new_value[:-2] + '.' + new_value[-2:]
except IndexError:
# Price is 99 or less with no cents
pass
if new_value != '':
found_price[key] = float(new_value)
else:
found_price[key] = None
return found_price
def get_image_dimension(url):
import requests
from PIL import Image # pip install pillow
from io import BytesIO
size = {'width': None,
'height': None,
}
try:
if url.startswith('//'):
url = "http:" + url
data = requests.get(url, timeout=15).content
im = Image.open(BytesIO(data))
size['width'], size['height'] = im.size
except Exception:
logger.debug("Error getting image size {url}".format(url=url), exc_info=True)
logger.warning("Error getting image size {url}".format(url=url))
return size
def crop_image(image_file, output_file=None, height=None, width=None, x=None, y=None):
from PIL import Image # pip install pillow
if output_file is None or height is None or width is None or x is None or y is None:
logger.error("Must pass in all params: output_file, height, width, x, and y as named args")
return None
if width <= 0 or height <= 0:
logger.warning("Width and height must be 1 or greater")
raise ValueError
image = Image.open(image_file)
image.crop((x, y, width + x, height + y)).save(output_file)
return output_file
####
# Decorators
####
def rate_limited(num_calls=1, every=1.0):
"""
Source: https://github.com/tomasbasham/ratelimit/tree/0ca5a616fa6d184fa180b9ad0b6fd0cf54c46936
Need to make a few changes that included having num_calls be a float
Prevent a method from being called
if it was previously called before
a time widows has elapsed.
Keyword Arguments:
num_calls (float): Maximum method invocations within a period. Must be greater than 0.
every (float): A dampening factor (in seconds). Can be any number greater than 0.
Return:
function: Decorated function that will forward method invocations if the time window has elapsed.
"""
frequency = abs(every) / float(num_calls)
def decorator(func):
"""
Extend the behaviour of the following
function, forwarding method invocations
if the time window hes elapsed.
Arguments:
func (function): The function to decorate
Returns:
function: Decorated function
"""
# To get around issues with function local scope
# and reassigning variables, we wrap the time
# within a list. When updating the value we're
# not reassigning `last_called`, which would not
# work, but instead reassigning the value at a
# particular index.
last_called = [0.0]
# Add thread safety
lock = threading.RLock()
def wrapper(*args, **kargs):
"""Decorator wrapper function"""
with lock:
elapsed = time.time() - last_called[0]
left_to_wait = frequency - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
last_called[0] = time.time()
return func(*args, **kargs)
return wrapper
return decorator
def rate_limited_old(max_per_second):
"""
Source: https://gist.github.com/gregburek/1441055
"""
lock = threading.Lock()
min_interval = 1.0 / max_per_second
def decorate(func):
last_time_called = time.perf_counter()
@wraps(func)
def rate_limited_function(*args, **kwargs):
lock.acquire()
nonlocal last_time_called
try:
elapsed = time.perf_counter() - last_time_called
left_to_wait = min_interval - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
return func(*args, **kwargs)
finally:
last_time_called = time.perf_counter()
lock.release()
return rate_limited_function
return decorate
def timeit(stat_tracker_func, name):
"""
Pass in a function and the name of the stat
Will time the function that this is a decorator to and send
the `name` as well as the value (in seconds) to `stat_tracker_func`
`stat_tracker_func` can be used to either print out the data or save it
"""
def _timeit(func):
def wrapper(*args, **kw):
start_time = time.time()
result = func(*args, **kw)
stop_time = time.time()
stat_tracker_func(name, stop_time - start_time)
return result
return wrapper
return _timeit
####
# Regex
####
# Keep re.compile's outside of fn as to only create it once
proxy_parts_pattern = re.compile('^(?:(?P<schema>\w+):\/\/)(?:(?P<user>.*):(?P<password>.*)@)?(?P<host>[^:]*)(?::(?P<port>\d+))?$')
def remove_html_tag(input_str='', tag=None):
"""
Returns a string with the html tag and all its contents from a string
"""
result = input_str
if tag is not None:
pattern = re.compile('<{tag}[\s\S]+?/{tag}>'.format(tag=tag))
result = re.sub(pattern, '', str(input_str))
return result
|
xtream1101/cutil
|
cutil/__init__.py
|
remove_html_tag
|
python
|
def remove_html_tag(input_str='', tag=None):
result = input_str
if tag is not None:
pattern = re.compile('<{tag}[\s\S]+?/{tag}>'.format(tag=tag))
result = re.sub(pattern, '', str(input_str))
return result
|
Returns a string with the html tag and all its contents from a string
|
train
|
https://github.com/xtream1101/cutil/blob/2e4d1f00e66154b44d4ccffb9b1db3f37e87f2e8/cutil/__init__.py#L527-L536
| null |
from cutil.database import Database # noqa: F401
from cutil.config import Config # noqa: F401
from cutil.custom_terminal import CustomTerminal # noqa: F401
from cutil.repeating_timer import RepeatingTimer # noqa: F401
import os
import re
import sys
import uuid
import math
import time
import pytz
import json
import queue
import random
import socket
import urllib
import hashlib
import logging
import datetime
import threading
import collections
from hashids import Hashids
from functools import wraps
from operator import itemgetter
from functools import cmp_to_key
logger = logging.getLogger(__name__)
####
# Time related functions
####
def get_epoch():
"""
:return: time as epoch
"""
return int(time.time())
def get_datetime():
"""
:return: datetime object
"""
return datetime.datetime.now()
def datetime_to_str(timestamp):
return timestamp.isoformat() + "+0000"
def datetime_to_utc(timestamp):
return timestamp.astimezone(pytz.timezone('UTC'))
def str_to_date(timestamp, formats=["%Y-%m-%dT%H:%M:%S.%f%z", "%Y-%m-%dT%H:%M:%S%z"]):
rdata = None
if timestamp is None:
return None
for time_format in formats:
try:
rdata = datetime.datetime.strptime(timestamp, time_format)
except ValueError:
rdata = None
else:
# If we did not raise an exception
break
return rdata
###
# Threading
###
def threads(num_threads, data, callback, *args, **kwargs):
q = queue.Queue()
item_list = []
def _thread_run():
while True:
item = q.get()
try:
item_list.append(callback(item, *args, **kwargs))
except Exception:
logger.exception("Error in _thread_run callback {} with item:\n{}".format(callback.__name__, item))
q.task_done()
for i in range(num_threads):
t = threading.Thread(target=_thread_run)
t.daemon = True
t.start()
# Fill the Queue with the data to process
for item in data:
q.put(item)
# Start processing the data
q.join()
return item_list
####
# Other functions
####
def multikey_sort(items, columns):
"""Source: https://stackoverflow.com/questions/1143671/python-sorting-list-of-dictionaries-by-multiple-keys
"""
comparers = [
((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1))
for col in columns
]
def cmp(a, b):
return (a > b) - (a < b)
def comparer(left, right):
comparer_iter = (
cmp(fn(left), fn(right)) * mult
for fn, mult in comparers
)
return next((result for result in comparer_iter if result), 0)
return sorted(items, key=cmp_to_key(comparer))
def get_internal_ip():
return socket.gethostbyname(socket.gethostname())
def generate_key(value=None, salt=None, size=8):
if value is None:
value = random.randint(0, 999999)
if salt is None:
salt = random.randint(0, 999999)
if not isinstance(size, int):
# Enforce that size must be an int
size = 8
hashids = Hashids(salt=str(salt), min_length=size)
return hashids.encode(value)
def create_uid():
# Named uid and not uuid because this function does not have to use uuid's
uid = uuid.uuid4().hex
logger.debug("Created new uid: {uid}".format(uid=uid))
return uid
def make_url_safe(string):
# Convert special chars to % chars
return urllib.parse.quote_plus(string)
def sanitize(string):
"""
Catch and replace invalid path chars
[replace, with]
"""
replace_chars = [
['\\', '-'], [':', '-'], ['/', '-'],
['?', ''], ['<', ''], ['>', ''],
['`', '`'], ['|', '-'], ['*', '`'],
['"', '\''], ['.', ''], ['&', 'and']
]
for ch in replace_chars:
string = string.replace(ch[0], ch[1])
return string
def rreplace(s, old, new, occurrence):
"""
Taken from: http://stackoverflow.com/a/2556252
"""
li = s.rsplit(old, occurrence)
return new.join(li)
def flatten(dict_obj, prev_key='', sep='_'):
items = {}
for key, value in dict_obj.items():
new_key = prev_key + sep + key if prev_key != '' else key
if isinstance(value, dict):
items.update(flatten(value, new_key))
else:
items[new_key] = value
return items
def update_dict(d, u):
"""
Source: https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
"""
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def get_script_name(ext=False):
name = os.path.basename(sys.argv[0])
if ext is False:
name = name.split('.')[0]
return name
def chunks_of(max_chunk_size, list_to_chunk):
"""
Yields the list with a max size of max_chunk_size
"""
for i in range(0, len(list_to_chunk), max_chunk_size):
yield list_to_chunk[i:i + max_chunk_size]
def split_into(max_num_chunks, list_to_chunk):
"""
Yields the list with a max total size of max_num_chunks
"""
max_chunk_size = math.ceil(len(list_to_chunk) / max_num_chunks)
return chunks_of(max_chunk_size, list_to_chunk)
####
# File/filesystem related function
####
def get_file_ext(file):
file_name, file_extension = os.path.splitext(file)
return file_extension
def norm_path(path):
"""
:return: Proper path for os with vars expanded out
"""
# path = os.path.normcase(path)
path = os.path.expanduser(path)
path = os.path.expandvars(path)
path = os.path.normpath(path)
return path
def create_hashed_path(base_path, name, depth=2):
"""
Create a directory structure using the hashed filename
:return: string of the path to save to not including filename/ext
"""
if depth > 16:
logger.warning("depth cannot be greater then 16, setting to 16")
depth = 16
name_hash = hashlib.md5(str(name).encode('utf-8')).hexdigest()
if base_path.endswith(os.path.sep):
save_path = base_path
else:
save_path = base_path + os.path.sep
for i in range(1, depth + 1):
end = i * 2
start = end - 2
save_path += name_hash[start:end] + os.path.sep
return {'path': save_path,
'hash': name_hash,
}
def create_path(path, is_dir=False):
"""
Check if path exists, if not create it
:param path: path or file to create directory for
:param is_dir: pass True if we are passing in a directory, default = False
:return: os safe path from `path`
"""
path = norm_path(path)
path_check = path
if not is_dir:
path_check = os.path.dirname(path)
does_path_exists = os.path.exists(path_check)
if does_path_exists:
return path
try:
os.makedirs(path_check)
except OSError:
pass
return path
def dump_json(file_, data, **kwargs):
json_args = {'sort_keys': True,
'indent': 4}
json_args.update(**kwargs)
create_path(file_)
if not file_.endswith('.json'):
file_ += '.json'
with open(file_, 'w') as outfile:
json.dump(data, outfile, **json_args)
# Lets only do this once
price_pattern = re.compile('(?P<low>[\d,.\s]+)(?:\D*(?P<high>[\d,.\s]+))?')
def parse_price(price):
found_price = {'low': None,
'high': None
}
price_raw = re.search(price_pattern, price)
if price_raw:
matched = price_raw.groupdict()
found_price['low'] = matched.get('low')
found_price['high'] = matched.get('high')
for key, value in found_price.items():
if value is not None:
value = value.strip()
new_value = value.replace(',', '').replace('.', '').replace(' ', '')
try:
# Check if price has cents
if value[-3] in [',', '.']:
# Add . for cents back
new_value = new_value[:-2] + '.' + new_value[-2:]
except IndexError:
# Price is 99 or less with no cents
pass
if new_value != '':
found_price[key] = float(new_value)
else:
found_price[key] = None
return found_price
def get_image_dimension(url):
import requests
from PIL import Image # pip install pillow
from io import BytesIO
size = {'width': None,
'height': None,
}
try:
if url.startswith('//'):
url = "http:" + url
data = requests.get(url, timeout=15).content
im = Image.open(BytesIO(data))
size['width'], size['height'] = im.size
except Exception:
logger.debug("Error getting image size {url}".format(url=url), exc_info=True)
logger.warning("Error getting image size {url}".format(url=url))
return size
def crop_image(image_file, output_file=None, height=None, width=None, x=None, y=None):
from PIL import Image # pip install pillow
if output_file is None or height is None or width is None or x is None or y is None:
logger.error("Must pass in all params: output_file, height, width, x, and y as named args")
return None
if width <= 0 or height <= 0:
logger.warning("Width and height must be 1 or greater")
raise ValueError
image = Image.open(image_file)
image.crop((x, y, width + x, height + y)).save(output_file)
return output_file
####
# Decorators
####
def rate_limited(num_calls=1, every=1.0):
"""
Source: https://github.com/tomasbasham/ratelimit/tree/0ca5a616fa6d184fa180b9ad0b6fd0cf54c46936
Need to make a few changes that included having num_calls be a float
Prevent a method from being called
if it was previously called before
a time widows has elapsed.
Keyword Arguments:
num_calls (float): Maximum method invocations within a period. Must be greater than 0.
every (float): A dampening factor (in seconds). Can be any number greater than 0.
Return:
function: Decorated function that will forward method invocations if the time window has elapsed.
"""
frequency = abs(every) / float(num_calls)
def decorator(func):
"""
Extend the behaviour of the following
function, forwarding method invocations
if the time window hes elapsed.
Arguments:
func (function): The function to decorate
Returns:
function: Decorated function
"""
# To get around issues with function local scope
# and reassigning variables, we wrap the time
# within a list. When updating the value we're
# not reassigning `last_called`, which would not
# work, but instead reassigning the value at a
# particular index.
last_called = [0.0]
# Add thread safety
lock = threading.RLock()
def wrapper(*args, **kargs):
"""Decorator wrapper function"""
with lock:
elapsed = time.time() - last_called[0]
left_to_wait = frequency - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
last_called[0] = time.time()
return func(*args, **kargs)
return wrapper
return decorator
def rate_limited_old(max_per_second):
"""
Source: https://gist.github.com/gregburek/1441055
"""
lock = threading.Lock()
min_interval = 1.0 / max_per_second
def decorate(func):
last_time_called = time.perf_counter()
@wraps(func)
def rate_limited_function(*args, **kwargs):
lock.acquire()
nonlocal last_time_called
try:
elapsed = time.perf_counter() - last_time_called
left_to_wait = min_interval - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
return func(*args, **kwargs)
finally:
last_time_called = time.perf_counter()
lock.release()
return rate_limited_function
return decorate
def timeit(stat_tracker_func, name):
"""
Pass in a function and the name of the stat
Will time the function that this is a decorator to and send
the `name` as well as the value (in seconds) to `stat_tracker_func`
`stat_tracker_func` can be used to either print out the data or save it
"""
def _timeit(func):
def wrapper(*args, **kw):
start_time = time.time()
result = func(*args, **kw)
stop_time = time.time()
stat_tracker_func(name, stop_time - start_time)
return result
return wrapper
return _timeit
####
# Regex
####
# Keep re.compile's outside of fn as to only create it once
proxy_parts_pattern = re.compile('^(?:(?P<schema>\w+):\/\/)(?:(?P<user>.*):(?P<password>.*)@)?(?P<host>[^:]*)(?::(?P<port>\d+))?$')
def get_proxy_parts(proxy):
"""
Take a proxy url and break it up to its parts
"""
proxy_parts = {'schema': None,
'user': None,
'password': None,
'host': None,
'port': None,
}
# Find parts
results = re.match(proxy_parts_pattern, proxy)
if results:
matched = results.groupdict()
for key in proxy_parts:
proxy_parts[key] = matched.get(key)
else:
logger.error("Invalid proxy format `{proxy}`".format(proxy=proxy))
if proxy_parts['port'] is None:
proxy_parts['port'] = '80'
return proxy_parts
|
stephrdev/django-tapeforms
|
tapeforms/utils.py
|
join_css_class
|
python
|
def join_css_class(css_class, *additional_css_classes):
css_set = set(chain.from_iterable(
c.split(' ') for c in [css_class, *additional_css_classes] if c))
return ' '.join(css_set)
|
Returns the union of one or more CSS classes as a space-separated string.
Note that the order will not be preserved.
|
train
|
https://github.com/stephrdev/django-tapeforms/blob/255602de43777141f18afaf30669d7bdd4f7c323/tapeforms/utils.py#L4-L11
| null |
from itertools import chain
|
stephrdev/django-tapeforms
|
tapeforms/fieldsets.py
|
TapeformFieldset.visible_fields
|
python
|
def visible_fields(self):
form_visible_fields = self.form.visible_fields()
if self.render_fields:
fields = self.render_fields
else:
fields = [field.name for field in form_visible_fields]
filtered_fields = [field for field in fields if field not in self.exclude_fields]
return [field for field in form_visible_fields if field.name in filtered_fields]
|
Returns the reduced set of visible fields to output from the form.
This method respects the provided ``fields`` configuration _and_ exlcudes
all fields from the ``exclude`` configuration.
If no ``fields`` where provided when configuring this fieldset, all visible
fields minus the excluded fields will be returned.
:return: List of bound field instances or empty tuple.
|
train
|
https://github.com/stephrdev/django-tapeforms/blob/255602de43777141f18afaf30669d7bdd4f7c323/tapeforms/fieldsets.py#L76-L97
| null |
class TapeformFieldset(TapeformLayoutMixin, object):
"""
Class to render a subset of a form's fields. From a template perspective,
a fieldset looks quite similar to a form (and can use the same template tag
to render: ``form``.
"""
def __init__(
self, form, fields=None, exclude=None, primary=False, template=None,
extra=None
):
"""
Initializes a fieldset instance to be used like a form in a template.
Just like in ModelForm Meta, you have to provide at least a list of
fields to render in this fieldset or a list of fields to exclude.
If you provide both, exclusions have a higher priority.
:param form: The form instance to take fields from.
:param fields: A list of visible field names to include in this fieldset.
:param exclude: A list of visible fields to _not_ include in this fieldset.
:param primary: If the fieldset is `primary`, this fieldset is responsible
for rendering the hidden fields and non field errors.
:param template: You can provide an alternative layout template to use.
:param extra: This argument is carried around with the fieldset and is also
available in the template. Useful to pass some special arguments
for rendering around (like a fieldset headline.
:return: A configured fieldset instance.
"""
assert fields or exclude is not None, 'Please provide fields or exclude argument.'
self.form = form
self.render_fields = fields or ()
self.exclude_fields = exclude or ()
self.primary_fieldset = primary
self.extra = extra or {}
if template:
self.layout_template = template
def __repr__(self):
return '<{cls} form={form}, primary={primary}, fields=({fields})/({exclude})>'.format(
cls=self.__class__.__name__,
form=repr(self.form),
primary=self.primary_fieldset,
fields=';'.join(self.render_fields),
exclude=';'.join(self.exclude_fields),
)
def hidden_fields(self):
"""
Returns the hidden fields of the form for rendering of the fieldset is
marked as the primary fieldset.
:return: List of bound field instances or empty tuple.
"""
return self.form.hidden_fields() if self.primary_fieldset else ()
def non_field_errors(self):
"""
Returns all non-field errors of the form for rendering of the fieldset is
marked as the primary fieldset.
:return: ErrorList instance with non field errors or empty ErrorList.
"""
return self.form.non_field_errors() if self.primary_fieldset else ErrorList()
|
stephrdev/django-tapeforms
|
tapeforms/fieldsets.py
|
TapeformFieldsetsMixin.get_fieldsets
|
python
|
def get_fieldsets(self, fieldsets=None):
fieldsets = fieldsets or self.fieldsets
if not fieldsets:
raise StopIteration
# Search for primary marker in at least one of the fieldset kwargs.
has_primary = any(fieldset.get('primary') for fieldset in fieldsets)
for fieldset_kwargs in fieldsets:
fieldset_kwargs = copy.deepcopy(fieldset_kwargs)
fieldset_kwargs['form'] = self
if not has_primary:
fieldset_kwargs['primary'] = True
has_primary = True
yield self.get_fieldset(**fieldset_kwargs)
|
This method returns a generator which yields fieldset instances.
The method uses the optional fieldsets argument to generate fieldsets for.
If no fieldsets argument is passed, the class property ``fieldsets`` is used.
When generating the fieldsets, the method ensures that at least one fielset
will be the primary fieldset which is responsible for rendering the non field
errors and hidden fields.
:param fieldsets: Alternative set of fieldset kwargs. If passed this set is
prevered of the ``fieldsets`` property of the form.
:return: generator which yields fieldset instances.
|
train
|
https://github.com/stephrdev/django-tapeforms/blob/255602de43777141f18afaf30669d7bdd4f7c323/tapeforms/fieldsets.py#L132-L163
| null |
class TapeformFieldsetsMixin:
"""
Mixin to generate fieldsets based on the `fieldsets` property of a
``TapeformFieldsetsMixin`` enabled form.
"""
#: Default fieldset class to use when instantiating a fieldset.
fieldset_class = TapeformFieldset
#: List/tuple of kwargs as `dict`` to generate fieldsets for.
fieldsets = None
def get_fieldset_class(self, **fieldset_kwargs):
"""
Returns the fieldset class to use when generating the fieldset using
the passed fieldset kwargs.
:param fieldset_kwargs: ``dict`` with the fieldset config from ``fieldsets``
:return: Class to use when instantiating the fieldset.
"""
return self.fieldset_class
def get_fieldset(self, **fieldset_kwargs):
"""
Returns a fieldset instance for the passed ``fieldset_kwargs``.
:param fieldset_kwargs: ``dict`` with the fieldset config from ``fieldsets``
:return: Fieldset instance
"""
cls = self.get_fieldset_class(**fieldset_kwargs)
return cls(**fieldset_kwargs)
|
stephrdev/django-tapeforms
|
tapeforms/templatetags/tapeforms.py
|
form
|
python
|
def form(context, form, **kwargs):
if not isinstance(form, (forms.BaseForm, TapeformFieldset)):
raise template.TemplateSyntaxError(
'Provided form should be a `Form` instance, actual type: {0}'.format(
form.__class__.__name__))
return render_to_string(
form.get_layout_template(kwargs.get('using', None)),
form.get_layout_context(),
)
|
The `form` template tag will render a tape-form enabled form using the template
provided by `get_layout_template` method of the form using the context generated
by `get_layout_context` method of the form.
Usage::
{% load tapeforms %}
{% form my_form %}
You can override the used layout template using the keyword argument `using`::
{% load tapeforms %}
{% form my_form using='other_form_layout_template.html' %}
:param form: The Django form to render.
:return: Rendered form (errors + hidden fields + fields) as HTML.
|
train
|
https://github.com/stephrdev/django-tapeforms/blob/255602de43777141f18afaf30669d7bdd4f7c323/tapeforms/templatetags/tapeforms.py#L11-L39
| null |
from django import forms, template
from django.template.loader import render_to_string
from ..fieldsets import TapeformFieldset
register = template.Library()
@register.simple_tag(takes_context=True)
@register.simple_tag(takes_context=True)
def formfield(context, bound_field, **kwargs):
"""
The `formfield` template tag will render a form field of a tape-form enabled form
using the template provided by `get_field_template` method of the form together with
the context generated by `get_field_context` method of the form.
Usage::
{% load tapeforms %}
{% formfield my_form.my_field %}
You can override the used field template using the keyword argument `using`::
{% load tapeforms %}
{% formfield my_form.my_field using='other_field_template.html' %}
:param bound_field: The `BoundField` from a Django form to render.
:return: Rendered field (label + widget + other stuff) as HTML.
"""
if not isinstance(bound_field, forms.BoundField):
raise template.TemplateSyntaxError(
'Provided field should be a `BoundField` instance, actual type: {0}'.format(
bound_field.__class__.__name__))
return render_to_string(
bound_field.form.get_field_template(bound_field, kwargs.get('using', None)),
bound_field.form.get_field_context(bound_field),
)
|
stephrdev/django-tapeforms
|
tapeforms/templatetags/tapeforms.py
|
formfield
|
python
|
def formfield(context, bound_field, **kwargs):
if not isinstance(bound_field, forms.BoundField):
raise template.TemplateSyntaxError(
'Provided field should be a `BoundField` instance, actual type: {0}'.format(
bound_field.__class__.__name__))
return render_to_string(
bound_field.form.get_field_template(bound_field, kwargs.get('using', None)),
bound_field.form.get_field_context(bound_field),
)
|
The `formfield` template tag will render a form field of a tape-form enabled form
using the template provided by `get_field_template` method of the form together with
the context generated by `get_field_context` method of the form.
Usage::
{% load tapeforms %}
{% formfield my_form.my_field %}
You can override the used field template using the keyword argument `using`::
{% load tapeforms %}
{% formfield my_form.my_field using='other_field_template.html' %}
:param bound_field: The `BoundField` from a Django form to render.
:return: Rendered field (label + widget + other stuff) as HTML.
|
train
|
https://github.com/stephrdev/django-tapeforms/blob/255602de43777141f18afaf30669d7bdd4f7c323/tapeforms/templatetags/tapeforms.py#L43-L71
| null |
from django import forms, template
from django.template.loader import render_to_string
from ..fieldsets import TapeformFieldset
register = template.Library()
@register.simple_tag(takes_context=True)
def form(context, form, **kwargs):
"""
The `form` template tag will render a tape-form enabled form using the template
provided by `get_layout_template` method of the form using the context generated
by `get_layout_context` method of the form.
Usage::
{% load tapeforms %}
{% form my_form %}
You can override the used layout template using the keyword argument `using`::
{% load tapeforms %}
{% form my_form using='other_form_layout_template.html' %}
:param form: The Django form to render.
:return: Rendered form (errors + hidden fields + fields) as HTML.
"""
if not isinstance(form, (forms.BaseForm, TapeformFieldset)):
raise template.TemplateSyntaxError(
'Provided form should be a `Form` instance, actual type: {0}'.format(
form.__class__.__name__))
return render_to_string(
form.get_layout_template(kwargs.get('using', None)),
form.get_layout_context(),
)
@register.simple_tag(takes_context=True)
|
stephrdev/django-tapeforms
|
tapeforms/mixins.py
|
TapeformLayoutMixin.get_layout_template
|
python
|
def get_layout_template(self, template_name=None):
if template_name:
return template_name
if self.layout_template:
return self.layout_template
return defaults.LAYOUT_DEFAULT_TEMPLATE
|
Returns the layout template to use when rendering the form to HTML.
Preference of template selection:
1. Provided method argument `template_name`
2. Form class property `layout_template`
3. Globally defined default template from `defaults.LAYOUT_DEFAULT_TEMPLATE`
:param template_name: Optional template to use instead of other configurations.
:return: Template name to use when rendering the form.
|
train
|
https://github.com/stephrdev/django-tapeforms/blob/255602de43777141f18afaf30669d7bdd4f7c323/tapeforms/mixins.py#L18-L37
| null |
class TapeformLayoutMixin:
"""
Mixin to render a form of fieldset as HTML.
"""
#: Layout template to use when rendering the form. Optional.
layout_template = None
def get_layout_context(self):
"""
Returns the context which is used when rendering the form to HTML.
The generated template context will contain the following variables:
* form: `Form` instance
* errors: `ErrorList` instance with non field errors and hidden field errors
* hidden_fields: All hidden fields to render.
* visible_fields: All visible fields to render.
:return: Template context for form rendering.
"""
errors = self.non_field_errors()
for field in self.hidden_fields():
errors.extend(field.errors)
return {
'form': self,
'errors': errors,
'hidden_fields': self.hidden_fields(),
'visible_fields': self.visible_fields(),
}
def as_tapeform(self):
"""
Shortcut to render the form as a "tapeform" without including the tapeforms
templatetags. Behaves similar to `as_p` and `as_table`.
"""
return render_to_string(
self.get_layout_template(), self.get_layout_context())
|
stephrdev/django-tapeforms
|
tapeforms/mixins.py
|
TapeformLayoutMixin.get_layout_context
|
python
|
def get_layout_context(self):
errors = self.non_field_errors()
for field in self.hidden_fields():
errors.extend(field.errors)
return {
'form': self,
'errors': errors,
'hidden_fields': self.hidden_fields(),
'visible_fields': self.visible_fields(),
}
|
Returns the context which is used when rendering the form to HTML.
The generated template context will contain the following variables:
* form: `Form` instance
* errors: `ErrorList` instance with non field errors and hidden field errors
* hidden_fields: All hidden fields to render.
* visible_fields: All visible fields to render.
:return: Template context for form rendering.
|
train
|
https://github.com/stephrdev/django-tapeforms/blob/255602de43777141f18afaf30669d7bdd4f7c323/tapeforms/mixins.py#L39-L61
| null |
class TapeformLayoutMixin:
"""
Mixin to render a form of fieldset as HTML.
"""
#: Layout template to use when rendering the form. Optional.
layout_template = None
def get_layout_template(self, template_name=None):
"""
Returns the layout template to use when rendering the form to HTML.
Preference of template selection:
1. Provided method argument `template_name`
2. Form class property `layout_template`
3. Globally defined default template from `defaults.LAYOUT_DEFAULT_TEMPLATE`
:param template_name: Optional template to use instead of other configurations.
:return: Template name to use when rendering the form.
"""
if template_name:
return template_name
if self.layout_template:
return self.layout_template
return defaults.LAYOUT_DEFAULT_TEMPLATE
def as_tapeform(self):
"""
Shortcut to render the form as a "tapeform" without including the tapeforms
templatetags. Behaves similar to `as_p` and `as_table`.
"""
return render_to_string(
self.get_layout_template(), self.get_layout_context())
|
stephrdev/django-tapeforms
|
tapeforms/mixins.py
|
TapeformMixin.full_clean
|
python
|
def full_clean(self, *args, **kwargs):
super().full_clean(*args, **kwargs)
for field in self.errors:
if field != NON_FIELD_ERRORS:
self.apply_widget_invalid_options(field)
|
The full_clean method is hijacked to apply special treatment to invalid
field inputs. For example adding extra options/classes to widgets.
|
train
|
https://github.com/stephrdev/django-tapeforms/blob/255602de43777141f18afaf30669d7bdd4f7c323/tapeforms/mixins.py#L119-L127
|
[
"def apply_widget_invalid_options(self, field_name):\n \"\"\"\n Applies additional widget options for an invalid field.\n\n This method is called when there is some error on a field to apply\n additional options on its widget. It does the following:\n\n * Sets the aria-invalid property of the widget for accessibility.\n * Adds an invalid CSS class, which is determined by the returned value\n of `get_widget_invalid_css_class` method. If a CSS class is returned,\n it is appended to the current value of the class property of the widget.\n\n :param field_name: A field name of the form.\n \"\"\"\n field = self.fields[field_name]\n class_name = self.get_widget_invalid_css_class(field_name, field)\n\n if class_name:\n field.widget.attrs['class'] = join_css_class(\n field.widget.attrs.get('class', None), class_name)\n\n field.widget.attrs['aria-invalid'] = 'true'\n"
] |
class TapeformMixin(TapeformLayoutMixin):
"""
Mixin to extend the forms capability to render itself as HTML output.
(using the template tags provided by `tapeforms`).
"""
#: Field template to use when rendering a bound form-field. Optional.
field_template = None
#: A dictionary of form-field names and/or form-field classes to override
#: the field template which is used when rendering a certain form-field.
#: Optional.
field_template_overrides = None
#: The CSS class to apply to the form-field container element.
field_container_css_class = 'form-field'
#: CSS class to append to the rendered field label tag. Optional.
field_label_css_class = None
#: An additional CSS class to append to the rendered field label tag when
#: the field has errors. Optional.
field_label_invalid_css_class = None
#: A dictionary of form-field names and/or widget classes to override
#: the widget template which is used when rendering a certain form-field.
#: Optional.
widget_template_overrides = None
#: CSS class to append to the widget attributes. Optional.
widget_css_class = None
#: An additional CSS class to append to the widget attributes when the field
#: has errors. Optional.
widget_invalid_css_class = None
def __init__(self, *args, **kwargs):
"""
The init method is overwritten to apply widget templates and CSS classes.
"""
super().__init__(*args, **kwargs)
for field_name in self.fields:
self.apply_widget_options(field_name)
self.apply_widget_template(field_name)
self.apply_widget_css_class(field_name)
def get_field_template(self, bound_field, template_name=None):
"""
Returns the field template to use when rendering a form field to HTML.
Preference of template selection:
1. Provided method argument `template_name`
2. Template from `field_template_overrides` selected by field name
3. Template from `field_template_overrides` selected by field class
4. Form class property `field_template`
5. Globally defined default template from `defaults.LAYOUT_FIELD_TEMPLATE`
:param bound_field: `BoundField` instance to select a template for.
:param template_name: Optional template to use instead of other configurations.
:return: Template name to use when rendering the form field.
"""
if template_name:
return template_name
templates = self.field_template_overrides or {}
template_name = templates.get(bound_field.name, None)
if template_name:
return template_name
template_name = templates.get(bound_field.field.__class__, None)
if template_name:
return template_name
if self.field_template:
return self.field_template
return defaults.FIELD_DEFAULT_TEMPLATE
def get_field_container_css_class(self, bound_field):
"""
Returns the container CSS class to use when rendering a field template.
By default, returns the Form class property `field_container_css_class`.
:param bound_field: `BoundField` instance to return CSS class for.
:return: A CSS class string.
"""
return self.field_container_css_class or None
def get_field_label_css_class(self, bound_field):
"""
Returns the optional label CSS class to use when rendering a field template.
By default, returns the Form class property `field_label_css_class`. If the
field has errors and the Form class property `field_label_invalid_css_class`
is defined, its value is appended to the CSS class.
:param bound_field: `BoundField` instance to return CSS class for.
:return: A CSS class string or `None`
"""
class_name = self.field_label_css_class
if bound_field.errors and self.field_label_invalid_css_class:
class_name = join_css_class(
class_name, self.field_label_invalid_css_class)
return class_name or None
def get_field_context(self, bound_field):
"""
Returns the context which is used when rendering a form field to HTML.
The generated template context will contain the following variables:
* form: `Form` instance
* field: `BoundField` instance of the field
* field_id: Field ID to use in `<label for="..">`
* field_name: Name of the form field to render
* errors: `ErrorList` instance with errors of the field
* required: Boolean flag to signal if the field is required or not
* label: The label text of the field
* label_css_class: The optional label CSS class, might be `None`
* help_text: Optional help text for the form field. Might be `None`
* container_css_class: The CSS class for the field container.
* widget_class_name: Lowercased version of the widget class name (e.g. 'textinput')
* widget_input_type: `input_type` property of the widget instance,
falls back to `widget_class_name` if not available.
:return: Template context for field rendering.
"""
widget = bound_field.field.widget
widget_class_name = widget.__class__.__name__.lower()
# Check if we have an overwritten id in widget attrs,
# if not use auto_id of bound field.
field_id = widget.attrs.get('id') or bound_field.auto_id
if field_id:
field_id = widget.id_for_label(field_id)
return {
'form': self,
'field': bound_field,
'field_id': field_id,
'field_name': bound_field.name,
'errors': bound_field.errors,
'required': bound_field.field.required,
'label': bound_field.label,
'label_css_class': self.get_field_label_css_class(bound_field),
'help_text': mark_safe(bound_field.help_text) if bound_field.help_text else None,
'container_css_class': self.get_field_container_css_class(bound_field),
'widget_class_name': widget_class_name,
'widget_input_type': getattr(widget, 'input_type', None) or widget_class_name
}
def apply_widget_options(self, field_name):
"""
Applies additional widget options like changing the input type of DateInput
and TimeInput to "date" / "time" to enable Browser date pickers or other
attributes/properties.
"""
widget = self.fields[field_name].widget
if isinstance(widget, forms.DateInput):
widget.input_type = 'date'
if isinstance(widget, forms.TimeInput):
widget.input_type = 'time'
if isinstance(widget, forms.SplitDateTimeWidget):
widget.widgets[0].input_type = 'date'
widget.widgets[1].input_type = 'time'
def apply_widget_template(self, field_name):
"""
Applies widget template overrides if available.
The method uses the `get_widget_template` method to determine if the widget
template should be exchanged. If a template is available, the template_name
property of the widget instance is updated.
:param field_name: A field name of the form.
"""
field = self.fields[field_name]
template_name = self.get_widget_template(field_name, field)
if template_name:
field.widget.template_name = template_name
def get_widget_template(self, field_name, field):
"""
Returns the optional widget template to use when rendering the widget
for a form field.
Preference of template selection:
1. Template from `widget_template_overrides` selected by field name
2. Template from `widget_template_overrides` selected by widget class
By default, returns `None` which means "use Django's default widget template".
:param field_name: The field name to select a widget template for.
:param field: `Field` instance to return a widget template.
:return: Template name to use when rendering the widget or `None`
"""
templates = self.widget_template_overrides or {}
template_name = templates.get(field_name, None)
if template_name:
return template_name
template_name = templates.get(field.widget.__class__, None)
if template_name:
return template_name
return None
def apply_widget_css_class(self, field_name):
"""
Applies CSS classes to widgets if available.
The method uses the `get_widget_css_class` method to determine if the widget
CSS class should be changed. If a CSS class is returned, it is appended to
the current value of the class property of the widget instance.
:param field_name: A field name of the form.
"""
field = self.fields[field_name]
class_name = self.get_widget_css_class(field_name, field)
if class_name:
field.widget.attrs['class'] = join_css_class(
field.widget.attrs.get('class', None), class_name)
def get_widget_css_class(self, field_name, field):
"""
Returns the optional widget CSS class to use when rendering the
form's field widget.
By default, returns `None` which means "no CSS class / no change".
:param field_name: The field name of the corresponding field for the widget.
:param field: `Field` instance to return CSS class for.
:return: A CSS class string or `None`
"""
return self.widget_css_class or None
def apply_widget_invalid_options(self, field_name):
"""
Applies additional widget options for an invalid field.
This method is called when there is some error on a field to apply
additional options on its widget. It does the following:
* Sets the aria-invalid property of the widget for accessibility.
* Adds an invalid CSS class, which is determined by the returned value
of `get_widget_invalid_css_class` method. If a CSS class is returned,
it is appended to the current value of the class property of the widget.
:param field_name: A field name of the form.
"""
field = self.fields[field_name]
class_name = self.get_widget_invalid_css_class(field_name, field)
if class_name:
field.widget.attrs['class'] = join_css_class(
field.widget.attrs.get('class', None), class_name)
field.widget.attrs['aria-invalid'] = 'true'
def get_widget_invalid_css_class(self, field_name, field):
"""
Returns the optional widget CSS class to append when rendering the
form's field widget in case of error.
By default, returns `None` which means "no CSS class / no change".
:param field_name: The field name of the corresponding field for the widget.
:param field: `Field` instance to return CSS class for.
:return: A CSS class string or `None`
"""
return self.widget_invalid_css_class or None
|
stephrdev/django-tapeforms
|
tapeforms/mixins.py
|
TapeformMixin.get_field_template
|
python
|
def get_field_template(self, bound_field, template_name=None):
if template_name:
return template_name
templates = self.field_template_overrides or {}
template_name = templates.get(bound_field.name, None)
if template_name:
return template_name
template_name = templates.get(bound_field.field.__class__, None)
if template_name:
return template_name
if self.field_template:
return self.field_template
return defaults.FIELD_DEFAULT_TEMPLATE
|
Returns the field template to use when rendering a form field to HTML.
Preference of template selection:
1. Provided method argument `template_name`
2. Template from `field_template_overrides` selected by field name
3. Template from `field_template_overrides` selected by field class
4. Form class property `field_template`
5. Globally defined default template from `defaults.LAYOUT_FIELD_TEMPLATE`
:param bound_field: `BoundField` instance to select a template for.
:param template_name: Optional template to use instead of other configurations.
:return: Template name to use when rendering the form field.
|
train
|
https://github.com/stephrdev/django-tapeforms/blob/255602de43777141f18afaf30669d7bdd4f7c323/tapeforms/mixins.py#L129-L161
| null |
class TapeformMixin(TapeformLayoutMixin):
"""
Mixin to extend the forms capability to render itself as HTML output.
(using the template tags provided by `tapeforms`).
"""
#: Field template to use when rendering a bound form-field. Optional.
field_template = None
#: A dictionary of form-field names and/or form-field classes to override
#: the field template which is used when rendering a certain form-field.
#: Optional.
field_template_overrides = None
#: The CSS class to apply to the form-field container element.
field_container_css_class = 'form-field'
#: CSS class to append to the rendered field label tag. Optional.
field_label_css_class = None
#: An additional CSS class to append to the rendered field label tag when
#: the field has errors. Optional.
field_label_invalid_css_class = None
#: A dictionary of form-field names and/or widget classes to override
#: the widget template which is used when rendering a certain form-field.
#: Optional.
widget_template_overrides = None
#: CSS class to append to the widget attributes. Optional.
widget_css_class = None
#: An additional CSS class to append to the widget attributes when the field
#: has errors. Optional.
widget_invalid_css_class = None
def __init__(self, *args, **kwargs):
"""
The init method is overwritten to apply widget templates and CSS classes.
"""
super().__init__(*args, **kwargs)
for field_name in self.fields:
self.apply_widget_options(field_name)
self.apply_widget_template(field_name)
self.apply_widget_css_class(field_name)
def full_clean(self, *args, **kwargs):
"""
The full_clean method is hijacked to apply special treatment to invalid
field inputs. For example adding extra options/classes to widgets.
"""
super().full_clean(*args, **kwargs)
for field in self.errors:
if field != NON_FIELD_ERRORS:
self.apply_widget_invalid_options(field)
def get_field_container_css_class(self, bound_field):
"""
Returns the container CSS class to use when rendering a field template.
By default, returns the Form class property `field_container_css_class`.
:param bound_field: `BoundField` instance to return CSS class for.
:return: A CSS class string.
"""
return self.field_container_css_class or None
def get_field_label_css_class(self, bound_field):
"""
Returns the optional label CSS class to use when rendering a field template.
By default, returns the Form class property `field_label_css_class`. If the
field has errors and the Form class property `field_label_invalid_css_class`
is defined, its value is appended to the CSS class.
:param bound_field: `BoundField` instance to return CSS class for.
:return: A CSS class string or `None`
"""
class_name = self.field_label_css_class
if bound_field.errors and self.field_label_invalid_css_class:
class_name = join_css_class(
class_name, self.field_label_invalid_css_class)
return class_name or None
def get_field_context(self, bound_field):
"""
Returns the context which is used when rendering a form field to HTML.
The generated template context will contain the following variables:
* form: `Form` instance
* field: `BoundField` instance of the field
* field_id: Field ID to use in `<label for="..">`
* field_name: Name of the form field to render
* errors: `ErrorList` instance with errors of the field
* required: Boolean flag to signal if the field is required or not
* label: The label text of the field
* label_css_class: The optional label CSS class, might be `None`
* help_text: Optional help text for the form field. Might be `None`
* container_css_class: The CSS class for the field container.
* widget_class_name: Lowercased version of the widget class name (e.g. 'textinput')
* widget_input_type: `input_type` property of the widget instance,
falls back to `widget_class_name` if not available.
:return: Template context for field rendering.
"""
widget = bound_field.field.widget
widget_class_name = widget.__class__.__name__.lower()
# Check if we have an overwritten id in widget attrs,
# if not use auto_id of bound field.
field_id = widget.attrs.get('id') or bound_field.auto_id
if field_id:
field_id = widget.id_for_label(field_id)
return {
'form': self,
'field': bound_field,
'field_id': field_id,
'field_name': bound_field.name,
'errors': bound_field.errors,
'required': bound_field.field.required,
'label': bound_field.label,
'label_css_class': self.get_field_label_css_class(bound_field),
'help_text': mark_safe(bound_field.help_text) if bound_field.help_text else None,
'container_css_class': self.get_field_container_css_class(bound_field),
'widget_class_name': widget_class_name,
'widget_input_type': getattr(widget, 'input_type', None) or widget_class_name
}
def apply_widget_options(self, field_name):
"""
Applies additional widget options like changing the input type of DateInput
and TimeInput to "date" / "time" to enable Browser date pickers or other
attributes/properties.
"""
widget = self.fields[field_name].widget
if isinstance(widget, forms.DateInput):
widget.input_type = 'date'
if isinstance(widget, forms.TimeInput):
widget.input_type = 'time'
if isinstance(widget, forms.SplitDateTimeWidget):
widget.widgets[0].input_type = 'date'
widget.widgets[1].input_type = 'time'
def apply_widget_template(self, field_name):
"""
Applies widget template overrides if available.
The method uses the `get_widget_template` method to determine if the widget
template should be exchanged. If a template is available, the template_name
property of the widget instance is updated.
:param field_name: A field name of the form.
"""
field = self.fields[field_name]
template_name = self.get_widget_template(field_name, field)
if template_name:
field.widget.template_name = template_name
def get_widget_template(self, field_name, field):
"""
Returns the optional widget template to use when rendering the widget
for a form field.
Preference of template selection:
1. Template from `widget_template_overrides` selected by field name
2. Template from `widget_template_overrides` selected by widget class
By default, returns `None` which means "use Django's default widget template".
:param field_name: The field name to select a widget template for.
:param field: `Field` instance to return a widget template.
:return: Template name to use when rendering the widget or `None`
"""
templates = self.widget_template_overrides or {}
template_name = templates.get(field_name, None)
if template_name:
return template_name
template_name = templates.get(field.widget.__class__, None)
if template_name:
return template_name
return None
def apply_widget_css_class(self, field_name):
"""
Applies CSS classes to widgets if available.
The method uses the `get_widget_css_class` method to determine if the widget
CSS class should be changed. If a CSS class is returned, it is appended to
the current value of the class property of the widget instance.
:param field_name: A field name of the form.
"""
field = self.fields[field_name]
class_name = self.get_widget_css_class(field_name, field)
if class_name:
field.widget.attrs['class'] = join_css_class(
field.widget.attrs.get('class', None), class_name)
def get_widget_css_class(self, field_name, field):
"""
Returns the optional widget CSS class to use when rendering the
form's field widget.
By default, returns `None` which means "no CSS class / no change".
:param field_name: The field name of the corresponding field for the widget.
:param field: `Field` instance to return CSS class for.
:return: A CSS class string or `None`
"""
return self.widget_css_class or None
def apply_widget_invalid_options(self, field_name):
"""
Applies additional widget options for an invalid field.
This method is called when there is some error on a field to apply
additional options on its widget. It does the following:
* Sets the aria-invalid property of the widget for accessibility.
* Adds an invalid CSS class, which is determined by the returned value
of `get_widget_invalid_css_class` method. If a CSS class is returned,
it is appended to the current value of the class property of the widget.
:param field_name: A field name of the form.
"""
field = self.fields[field_name]
class_name = self.get_widget_invalid_css_class(field_name, field)
if class_name:
field.widget.attrs['class'] = join_css_class(
field.widget.attrs.get('class', None), class_name)
field.widget.attrs['aria-invalid'] = 'true'
def get_widget_invalid_css_class(self, field_name, field):
"""
Returns the optional widget CSS class to append when rendering the
form's field widget in case of error.
By default, returns `None` which means "no CSS class / no change".
:param field_name: The field name of the corresponding field for the widget.
:param field: `Field` instance to return CSS class for.
:return: A CSS class string or `None`
"""
return self.widget_invalid_css_class or None
|
stephrdev/django-tapeforms
|
tapeforms/mixins.py
|
TapeformMixin.get_field_label_css_class
|
python
|
def get_field_label_css_class(self, bound_field):
class_name = self.field_label_css_class
if bound_field.errors and self.field_label_invalid_css_class:
class_name = join_css_class(
class_name, self.field_label_invalid_css_class)
return class_name or None
|
Returns the optional label CSS class to use when rendering a field template.
By default, returns the Form class property `field_label_css_class`. If the
field has errors and the Form class property `field_label_invalid_css_class`
is defined, its value is appended to the CSS class.
:param bound_field: `BoundField` instance to return CSS class for.
:return: A CSS class string or `None`
|
train
|
https://github.com/stephrdev/django-tapeforms/blob/255602de43777141f18afaf30669d7bdd4f7c323/tapeforms/mixins.py#L174-L191
|
[
"def join_css_class(css_class, *additional_css_classes):\n \"\"\"\n Returns the union of one or more CSS classes as a space-separated string.\n Note that the order will not be preserved.\n \"\"\"\n css_set = set(chain.from_iterable(\n c.split(' ') for c in [css_class, *additional_css_classes] if c))\n return ' '.join(css_set)\n"
] |
class TapeformMixin(TapeformLayoutMixin):
"""
Mixin to extend the forms capability to render itself as HTML output.
(using the template tags provided by `tapeforms`).
"""
#: Field template to use when rendering a bound form-field. Optional.
field_template = None
#: A dictionary of form-field names and/or form-field classes to override
#: the field template which is used when rendering a certain form-field.
#: Optional.
field_template_overrides = None
#: The CSS class to apply to the form-field container element.
field_container_css_class = 'form-field'
#: CSS class to append to the rendered field label tag. Optional.
field_label_css_class = None
#: An additional CSS class to append to the rendered field label tag when
#: the field has errors. Optional.
field_label_invalid_css_class = None
#: A dictionary of form-field names and/or widget classes to override
#: the widget template which is used when rendering a certain form-field.
#: Optional.
widget_template_overrides = None
#: CSS class to append to the widget attributes. Optional.
widget_css_class = None
#: An additional CSS class to append to the widget attributes when the field
#: has errors. Optional.
widget_invalid_css_class = None
def __init__(self, *args, **kwargs):
"""
The init method is overwritten to apply widget templates and CSS classes.
"""
super().__init__(*args, **kwargs)
for field_name in self.fields:
self.apply_widget_options(field_name)
self.apply_widget_template(field_name)
self.apply_widget_css_class(field_name)
def full_clean(self, *args, **kwargs):
"""
The full_clean method is hijacked to apply special treatment to invalid
field inputs. For example adding extra options/classes to widgets.
"""
super().full_clean(*args, **kwargs)
for field in self.errors:
if field != NON_FIELD_ERRORS:
self.apply_widget_invalid_options(field)
def get_field_template(self, bound_field, template_name=None):
"""
Returns the field template to use when rendering a form field to HTML.
Preference of template selection:
1. Provided method argument `template_name`
2. Template from `field_template_overrides` selected by field name
3. Template from `field_template_overrides` selected by field class
4. Form class property `field_template`
5. Globally defined default template from `defaults.LAYOUT_FIELD_TEMPLATE`
:param bound_field: `BoundField` instance to select a template for.
:param template_name: Optional template to use instead of other configurations.
:return: Template name to use when rendering the form field.
"""
if template_name:
return template_name
templates = self.field_template_overrides or {}
template_name = templates.get(bound_field.name, None)
if template_name:
return template_name
template_name = templates.get(bound_field.field.__class__, None)
if template_name:
return template_name
if self.field_template:
return self.field_template
return defaults.FIELD_DEFAULT_TEMPLATE
def get_field_container_css_class(self, bound_field):
"""
Returns the container CSS class to use when rendering a field template.
By default, returns the Form class property `field_container_css_class`.
:param bound_field: `BoundField` instance to return CSS class for.
:return: A CSS class string.
"""
return self.field_container_css_class or None
def get_field_context(self, bound_field):
"""
Returns the context which is used when rendering a form field to HTML.
The generated template context will contain the following variables:
* form: `Form` instance
* field: `BoundField` instance of the field
* field_id: Field ID to use in `<label for="..">`
* field_name: Name of the form field to render
* errors: `ErrorList` instance with errors of the field
* required: Boolean flag to signal if the field is required or not
* label: The label text of the field
* label_css_class: The optional label CSS class, might be `None`
* help_text: Optional help text for the form field. Might be `None`
* container_css_class: The CSS class for the field container.
* widget_class_name: Lowercased version of the widget class name (e.g. 'textinput')
* widget_input_type: `input_type` property of the widget instance,
falls back to `widget_class_name` if not available.
:return: Template context for field rendering.
"""
widget = bound_field.field.widget
widget_class_name = widget.__class__.__name__.lower()
# Check if we have an overwritten id in widget attrs,
# if not use auto_id of bound field.
field_id = widget.attrs.get('id') or bound_field.auto_id
if field_id:
field_id = widget.id_for_label(field_id)
return {
'form': self,
'field': bound_field,
'field_id': field_id,
'field_name': bound_field.name,
'errors': bound_field.errors,
'required': bound_field.field.required,
'label': bound_field.label,
'label_css_class': self.get_field_label_css_class(bound_field),
'help_text': mark_safe(bound_field.help_text) if bound_field.help_text else None,
'container_css_class': self.get_field_container_css_class(bound_field),
'widget_class_name': widget_class_name,
'widget_input_type': getattr(widget, 'input_type', None) or widget_class_name
}
def apply_widget_options(self, field_name):
"""
Applies additional widget options like changing the input type of DateInput
and TimeInput to "date" / "time" to enable Browser date pickers or other
attributes/properties.
"""
widget = self.fields[field_name].widget
if isinstance(widget, forms.DateInput):
widget.input_type = 'date'
if isinstance(widget, forms.TimeInput):
widget.input_type = 'time'
if isinstance(widget, forms.SplitDateTimeWidget):
widget.widgets[0].input_type = 'date'
widget.widgets[1].input_type = 'time'
def apply_widget_template(self, field_name):
"""
Applies widget template overrides if available.
The method uses the `get_widget_template` method to determine if the widget
template should be exchanged. If a template is available, the template_name
property of the widget instance is updated.
:param field_name: A field name of the form.
"""
field = self.fields[field_name]
template_name = self.get_widget_template(field_name, field)
if template_name:
field.widget.template_name = template_name
def get_widget_template(self, field_name, field):
"""
Returns the optional widget template to use when rendering the widget
for a form field.
Preference of template selection:
1. Template from `widget_template_overrides` selected by field name
2. Template from `widget_template_overrides` selected by widget class
By default, returns `None` which means "use Django's default widget template".
:param field_name: The field name to select a widget template for.
:param field: `Field` instance to return a widget template.
:return: Template name to use when rendering the widget or `None`
"""
templates = self.widget_template_overrides or {}
template_name = templates.get(field_name, None)
if template_name:
return template_name
template_name = templates.get(field.widget.__class__, None)
if template_name:
return template_name
return None
def apply_widget_css_class(self, field_name):
"""
Applies CSS classes to widgets if available.
The method uses the `get_widget_css_class` method to determine if the widget
CSS class should be changed. If a CSS class is returned, it is appended to
the current value of the class property of the widget instance.
:param field_name: A field name of the form.
"""
field = self.fields[field_name]
class_name = self.get_widget_css_class(field_name, field)
if class_name:
field.widget.attrs['class'] = join_css_class(
field.widget.attrs.get('class', None), class_name)
def get_widget_css_class(self, field_name, field):
"""
Returns the optional widget CSS class to use when rendering the
form's field widget.
By default, returns `None` which means "no CSS class / no change".
:param field_name: The field name of the corresponding field for the widget.
:param field: `Field` instance to return CSS class for.
:return: A CSS class string or `None`
"""
return self.widget_css_class or None
def apply_widget_invalid_options(self, field_name):
"""
Applies additional widget options for an invalid field.
This method is called when there is some error on a field to apply
additional options on its widget. It does the following:
* Sets the aria-invalid property of the widget for accessibility.
* Adds an invalid CSS class, which is determined by the returned value
of `get_widget_invalid_css_class` method. If a CSS class is returned,
it is appended to the current value of the class property of the widget.
:param field_name: A field name of the form.
"""
field = self.fields[field_name]
class_name = self.get_widget_invalid_css_class(field_name, field)
if class_name:
field.widget.attrs['class'] = join_css_class(
field.widget.attrs.get('class', None), class_name)
field.widget.attrs['aria-invalid'] = 'true'
def get_widget_invalid_css_class(self, field_name, field):
"""
Returns the optional widget CSS class to append when rendering the
form's field widget in case of error.
By default, returns `None` which means "no CSS class / no change".
:param field_name: The field name of the corresponding field for the widget.
:param field: `Field` instance to return CSS class for.
:return: A CSS class string or `None`
"""
return self.widget_invalid_css_class or None
|
stephrdev/django-tapeforms
|
tapeforms/mixins.py
|
TapeformMixin.get_field_context
|
python
|
def get_field_context(self, bound_field):
widget = bound_field.field.widget
widget_class_name = widget.__class__.__name__.lower()
# Check if we have an overwritten id in widget attrs,
# if not use auto_id of bound field.
field_id = widget.attrs.get('id') or bound_field.auto_id
if field_id:
field_id = widget.id_for_label(field_id)
return {
'form': self,
'field': bound_field,
'field_id': field_id,
'field_name': bound_field.name,
'errors': bound_field.errors,
'required': bound_field.field.required,
'label': bound_field.label,
'label_css_class': self.get_field_label_css_class(bound_field),
'help_text': mark_safe(bound_field.help_text) if bound_field.help_text else None,
'container_css_class': self.get_field_container_css_class(bound_field),
'widget_class_name': widget_class_name,
'widget_input_type': getattr(widget, 'input_type', None) or widget_class_name
}
|
Returns the context which is used when rendering a form field to HTML.
The generated template context will contain the following variables:
* form: `Form` instance
* field: `BoundField` instance of the field
* field_id: Field ID to use in `<label for="..">`
* field_name: Name of the form field to render
* errors: `ErrorList` instance with errors of the field
* required: Boolean flag to signal if the field is required or not
* label: The label text of the field
* label_css_class: The optional label CSS class, might be `None`
* help_text: Optional help text for the form field. Might be `None`
* container_css_class: The CSS class for the field container.
* widget_class_name: Lowercased version of the widget class name (e.g. 'textinput')
* widget_input_type: `input_type` property of the widget instance,
falls back to `widget_class_name` if not available.
:return: Template context for field rendering.
|
train
|
https://github.com/stephrdev/django-tapeforms/blob/255602de43777141f18afaf30669d7bdd4f7c323/tapeforms/mixins.py#L193-L237
|
[
"def get_field_container_css_class(self, bound_field):\n \"\"\"\n Returns the container CSS class to use when rendering a field template.\n\n By default, returns the Form class property `field_container_css_class`.\n\n :param bound_field: `BoundField` instance to return CSS class for.\n :return: A CSS class string.\n \"\"\"\n return self.field_container_css_class or None\n",
"def get_field_label_css_class(self, bound_field):\n \"\"\"\n Returns the optional label CSS class to use when rendering a field template.\n\n By default, returns the Form class property `field_label_css_class`. If the\n field has errors and the Form class property `field_label_invalid_css_class`\n is defined, its value is appended to the CSS class.\n\n :param bound_field: `BoundField` instance to return CSS class for.\n :return: A CSS class string or `None`\n \"\"\"\n class_name = self.field_label_css_class\n\n if bound_field.errors and self.field_label_invalid_css_class:\n class_name = join_css_class(\n class_name, self.field_label_invalid_css_class)\n\n return class_name or None\n"
] |
class TapeformMixin(TapeformLayoutMixin):
"""
Mixin to extend the forms capability to render itself as HTML output.
(using the template tags provided by `tapeforms`).
"""
#: Field template to use when rendering a bound form-field. Optional.
field_template = None
#: A dictionary of form-field names and/or form-field classes to override
#: the field template which is used when rendering a certain form-field.
#: Optional.
field_template_overrides = None
#: The CSS class to apply to the form-field container element.
field_container_css_class = 'form-field'
#: CSS class to append to the rendered field label tag. Optional.
field_label_css_class = None
#: An additional CSS class to append to the rendered field label tag when
#: the field has errors. Optional.
field_label_invalid_css_class = None
#: A dictionary of form-field names and/or widget classes to override
#: the widget template which is used when rendering a certain form-field.
#: Optional.
widget_template_overrides = None
#: CSS class to append to the widget attributes. Optional.
widget_css_class = None
#: An additional CSS class to append to the widget attributes when the field
#: has errors. Optional.
widget_invalid_css_class = None
def __init__(self, *args, **kwargs):
"""
The init method is overwritten to apply widget templates and CSS classes.
"""
super().__init__(*args, **kwargs)
for field_name in self.fields:
self.apply_widget_options(field_name)
self.apply_widget_template(field_name)
self.apply_widget_css_class(field_name)
def full_clean(self, *args, **kwargs):
"""
The full_clean method is hijacked to apply special treatment to invalid
field inputs. For example adding extra options/classes to widgets.
"""
super().full_clean(*args, **kwargs)
for field in self.errors:
if field != NON_FIELD_ERRORS:
self.apply_widget_invalid_options(field)
def get_field_template(self, bound_field, template_name=None):
"""
Returns the field template to use when rendering a form field to HTML.
Preference of template selection:
1. Provided method argument `template_name`
2. Template from `field_template_overrides` selected by field name
3. Template from `field_template_overrides` selected by field class
4. Form class property `field_template`
5. Globally defined default template from `defaults.LAYOUT_FIELD_TEMPLATE`
:param bound_field: `BoundField` instance to select a template for.
:param template_name: Optional template to use instead of other configurations.
:return: Template name to use when rendering the form field.
"""
if template_name:
return template_name
templates = self.field_template_overrides or {}
template_name = templates.get(bound_field.name, None)
if template_name:
return template_name
template_name = templates.get(bound_field.field.__class__, None)
if template_name:
return template_name
if self.field_template:
return self.field_template
return defaults.FIELD_DEFAULT_TEMPLATE
def get_field_container_css_class(self, bound_field):
"""
Returns the container CSS class to use when rendering a field template.
By default, returns the Form class property `field_container_css_class`.
:param bound_field: `BoundField` instance to return CSS class for.
:return: A CSS class string.
"""
return self.field_container_css_class or None
def get_field_label_css_class(self, bound_field):
"""
Returns the optional label CSS class to use when rendering a field template.
By default, returns the Form class property `field_label_css_class`. If the
field has errors and the Form class property `field_label_invalid_css_class`
is defined, its value is appended to the CSS class.
:param bound_field: `BoundField` instance to return CSS class for.
:return: A CSS class string or `None`
"""
class_name = self.field_label_css_class
if bound_field.errors and self.field_label_invalid_css_class:
class_name = join_css_class(
class_name, self.field_label_invalid_css_class)
return class_name or None
def apply_widget_options(self, field_name):
"""
Applies additional widget options like changing the input type of DateInput
and TimeInput to "date" / "time" to enable Browser date pickers or other
attributes/properties.
"""
widget = self.fields[field_name].widget
if isinstance(widget, forms.DateInput):
widget.input_type = 'date'
if isinstance(widget, forms.TimeInput):
widget.input_type = 'time'
if isinstance(widget, forms.SplitDateTimeWidget):
widget.widgets[0].input_type = 'date'
widget.widgets[1].input_type = 'time'
def apply_widget_template(self, field_name):
"""
Applies widget template overrides if available.
The method uses the `get_widget_template` method to determine if the widget
template should be exchanged. If a template is available, the template_name
property of the widget instance is updated.
:param field_name: A field name of the form.
"""
field = self.fields[field_name]
template_name = self.get_widget_template(field_name, field)
if template_name:
field.widget.template_name = template_name
def get_widget_template(self, field_name, field):
"""
Returns the optional widget template to use when rendering the widget
for a form field.
Preference of template selection:
1. Template from `widget_template_overrides` selected by field name
2. Template from `widget_template_overrides` selected by widget class
By default, returns `None` which means "use Django's default widget template".
:param field_name: The field name to select a widget template for.
:param field: `Field` instance to return a widget template.
:return: Template name to use when rendering the widget or `None`
"""
templates = self.widget_template_overrides or {}
template_name = templates.get(field_name, None)
if template_name:
return template_name
template_name = templates.get(field.widget.__class__, None)
if template_name:
return template_name
return None
def apply_widget_css_class(self, field_name):
"""
Applies CSS classes to widgets if available.
The method uses the `get_widget_css_class` method to determine if the widget
CSS class should be changed. If a CSS class is returned, it is appended to
the current value of the class property of the widget instance.
:param field_name: A field name of the form.
"""
field = self.fields[field_name]
class_name = self.get_widget_css_class(field_name, field)
if class_name:
field.widget.attrs['class'] = join_css_class(
field.widget.attrs.get('class', None), class_name)
def get_widget_css_class(self, field_name, field):
"""
Returns the optional widget CSS class to use when rendering the
form's field widget.
By default, returns `None` which means "no CSS class / no change".
:param field_name: The field name of the corresponding field for the widget.
:param field: `Field` instance to return CSS class for.
:return: A CSS class string or `None`
"""
return self.widget_css_class or None
def apply_widget_invalid_options(self, field_name):
"""
Applies additional widget options for an invalid field.
This method is called when there is some error on a field to apply
additional options on its widget. It does the following:
* Sets the aria-invalid property of the widget for accessibility.
* Adds an invalid CSS class, which is determined by the returned value
of `get_widget_invalid_css_class` method. If a CSS class is returned,
it is appended to the current value of the class property of the widget.
:param field_name: A field name of the form.
"""
field = self.fields[field_name]
class_name = self.get_widget_invalid_css_class(field_name, field)
if class_name:
field.widget.attrs['class'] = join_css_class(
field.widget.attrs.get('class', None), class_name)
field.widget.attrs['aria-invalid'] = 'true'
def get_widget_invalid_css_class(self, field_name, field):
"""
Returns the optional widget CSS class to append when rendering the
form's field widget in case of error.
By default, returns `None` which means "no CSS class / no change".
:param field_name: The field name of the corresponding field for the widget.
:param field: `Field` instance to return CSS class for.
:return: A CSS class string or `None`
"""
return self.widget_invalid_css_class or None
|
stephrdev/django-tapeforms
|
tapeforms/mixins.py
|
TapeformMixin.apply_widget_options
|
python
|
def apply_widget_options(self, field_name):
widget = self.fields[field_name].widget
if isinstance(widget, forms.DateInput):
widget.input_type = 'date'
if isinstance(widget, forms.TimeInput):
widget.input_type = 'time'
if isinstance(widget, forms.SplitDateTimeWidget):
widget.widgets[0].input_type = 'date'
widget.widgets[1].input_type = 'time'
|
Applies additional widget options like changing the input type of DateInput
and TimeInput to "date" / "time" to enable Browser date pickers or other
attributes/properties.
|
train
|
https://github.com/stephrdev/django-tapeforms/blob/255602de43777141f18afaf30669d7bdd4f7c323/tapeforms/mixins.py#L239-L255
| null |
class TapeformMixin(TapeformLayoutMixin):
"""
Mixin to extend the forms capability to render itself as HTML output.
(using the template tags provided by `tapeforms`).
"""
#: Field template to use when rendering a bound form-field. Optional.
field_template = None
#: A dictionary of form-field names and/or form-field classes to override
#: the field template which is used when rendering a certain form-field.
#: Optional.
field_template_overrides = None
#: The CSS class to apply to the form-field container element.
field_container_css_class = 'form-field'
#: CSS class to append to the rendered field label tag. Optional.
field_label_css_class = None
#: An additional CSS class to append to the rendered field label tag when
#: the field has errors. Optional.
field_label_invalid_css_class = None
#: A dictionary of form-field names and/or widget classes to override
#: the widget template which is used when rendering a certain form-field.
#: Optional.
widget_template_overrides = None
#: CSS class to append to the widget attributes. Optional.
widget_css_class = None
#: An additional CSS class to append to the widget attributes when the field
#: has errors. Optional.
widget_invalid_css_class = None
def __init__(self, *args, **kwargs):
"""
The init method is overwritten to apply widget templates and CSS classes.
"""
super().__init__(*args, **kwargs)
for field_name in self.fields:
self.apply_widget_options(field_name)
self.apply_widget_template(field_name)
self.apply_widget_css_class(field_name)
def full_clean(self, *args, **kwargs):
"""
The full_clean method is hijacked to apply special treatment to invalid
field inputs. For example adding extra options/classes to widgets.
"""
super().full_clean(*args, **kwargs)
for field in self.errors:
if field != NON_FIELD_ERRORS:
self.apply_widget_invalid_options(field)
def get_field_template(self, bound_field, template_name=None):
"""
Returns the field template to use when rendering a form field to HTML.
Preference of template selection:
1. Provided method argument `template_name`
2. Template from `field_template_overrides` selected by field name
3. Template from `field_template_overrides` selected by field class
4. Form class property `field_template`
5. Globally defined default template from `defaults.LAYOUT_FIELD_TEMPLATE`
:param bound_field: `BoundField` instance to select a template for.
:param template_name: Optional template to use instead of other configurations.
:return: Template name to use when rendering the form field.
"""
if template_name:
return template_name
templates = self.field_template_overrides or {}
template_name = templates.get(bound_field.name, None)
if template_name:
return template_name
template_name = templates.get(bound_field.field.__class__, None)
if template_name:
return template_name
if self.field_template:
return self.field_template
return defaults.FIELD_DEFAULT_TEMPLATE
def get_field_container_css_class(self, bound_field):
"""
Returns the container CSS class to use when rendering a field template.
By default, returns the Form class property `field_container_css_class`.
:param bound_field: `BoundField` instance to return CSS class for.
:return: A CSS class string.
"""
return self.field_container_css_class or None
def get_field_label_css_class(self, bound_field):
"""
Returns the optional label CSS class to use when rendering a field template.
By default, returns the Form class property `field_label_css_class`. If the
field has errors and the Form class property `field_label_invalid_css_class`
is defined, its value is appended to the CSS class.
:param bound_field: `BoundField` instance to return CSS class for.
:return: A CSS class string or `None`
"""
class_name = self.field_label_css_class
if bound_field.errors and self.field_label_invalid_css_class:
class_name = join_css_class(
class_name, self.field_label_invalid_css_class)
return class_name or None
def get_field_context(self, bound_field):
"""
Returns the context which is used when rendering a form field to HTML.
The generated template context will contain the following variables:
* form: `Form` instance
* field: `BoundField` instance of the field
* field_id: Field ID to use in `<label for="..">`
* field_name: Name of the form field to render
* errors: `ErrorList` instance with errors of the field
* required: Boolean flag to signal if the field is required or not
* label: The label text of the field
* label_css_class: The optional label CSS class, might be `None`
* help_text: Optional help text for the form field. Might be `None`
* container_css_class: The CSS class for the field container.
* widget_class_name: Lowercased version of the widget class name (e.g. 'textinput')
* widget_input_type: `input_type` property of the widget instance,
falls back to `widget_class_name` if not available.
:return: Template context for field rendering.
"""
widget = bound_field.field.widget
widget_class_name = widget.__class__.__name__.lower()
# Check if we have an overwritten id in widget attrs,
# if not use auto_id of bound field.
field_id = widget.attrs.get('id') or bound_field.auto_id
if field_id:
field_id = widget.id_for_label(field_id)
return {
'form': self,
'field': bound_field,
'field_id': field_id,
'field_name': bound_field.name,
'errors': bound_field.errors,
'required': bound_field.field.required,
'label': bound_field.label,
'label_css_class': self.get_field_label_css_class(bound_field),
'help_text': mark_safe(bound_field.help_text) if bound_field.help_text else None,
'container_css_class': self.get_field_container_css_class(bound_field),
'widget_class_name': widget_class_name,
'widget_input_type': getattr(widget, 'input_type', None) or widget_class_name
}
def apply_widget_template(self, field_name):
"""
Applies widget template overrides if available.
The method uses the `get_widget_template` method to determine if the widget
template should be exchanged. If a template is available, the template_name
property of the widget instance is updated.
:param field_name: A field name of the form.
"""
field = self.fields[field_name]
template_name = self.get_widget_template(field_name, field)
if template_name:
field.widget.template_name = template_name
def get_widget_template(self, field_name, field):
"""
Returns the optional widget template to use when rendering the widget
for a form field.
Preference of template selection:
1. Template from `widget_template_overrides` selected by field name
2. Template from `widget_template_overrides` selected by widget class
By default, returns `None` which means "use Django's default widget template".
:param field_name: The field name to select a widget template for.
:param field: `Field` instance to return a widget template.
:return: Template name to use when rendering the widget or `None`
"""
templates = self.widget_template_overrides or {}
template_name = templates.get(field_name, None)
if template_name:
return template_name
template_name = templates.get(field.widget.__class__, None)
if template_name:
return template_name
return None
def apply_widget_css_class(self, field_name):
"""
Applies CSS classes to widgets if available.
The method uses the `get_widget_css_class` method to determine if the widget
CSS class should be changed. If a CSS class is returned, it is appended to
the current value of the class property of the widget instance.
:param field_name: A field name of the form.
"""
field = self.fields[field_name]
class_name = self.get_widget_css_class(field_name, field)
if class_name:
field.widget.attrs['class'] = join_css_class(
field.widget.attrs.get('class', None), class_name)
def get_widget_css_class(self, field_name, field):
"""
Returns the optional widget CSS class to use when rendering the
form's field widget.
By default, returns `None` which means "no CSS class / no change".
:param field_name: The field name of the corresponding field for the widget.
:param field: `Field` instance to return CSS class for.
:return: A CSS class string or `None`
"""
return self.widget_css_class or None
def apply_widget_invalid_options(self, field_name):
"""
Applies additional widget options for an invalid field.
This method is called when there is some error on a field to apply
additional options on its widget. It does the following:
* Sets the aria-invalid property of the widget for accessibility.
* Adds an invalid CSS class, which is determined by the returned value
of `get_widget_invalid_css_class` method. If a CSS class is returned,
it is appended to the current value of the class property of the widget.
:param field_name: A field name of the form.
"""
field = self.fields[field_name]
class_name = self.get_widget_invalid_css_class(field_name, field)
if class_name:
field.widget.attrs['class'] = join_css_class(
field.widget.attrs.get('class', None), class_name)
field.widget.attrs['aria-invalid'] = 'true'
def get_widget_invalid_css_class(self, field_name, field):
"""
Returns the optional widget CSS class to append when rendering the
form's field widget in case of error.
By default, returns `None` which means "no CSS class / no change".
:param field_name: The field name of the corresponding field for the widget.
:param field: `Field` instance to return CSS class for.
:return: A CSS class string or `None`
"""
return self.widget_invalid_css_class or None
|
stephrdev/django-tapeforms
|
tapeforms/mixins.py
|
TapeformMixin.apply_widget_template
|
python
|
def apply_widget_template(self, field_name):
field = self.fields[field_name]
template_name = self.get_widget_template(field_name, field)
if template_name:
field.widget.template_name = template_name
|
Applies widget template overrides if available.
The method uses the `get_widget_template` method to determine if the widget
template should be exchanged. If a template is available, the template_name
property of the widget instance is updated.
:param field_name: A field name of the form.
|
train
|
https://github.com/stephrdev/django-tapeforms/blob/255602de43777141f18afaf30669d7bdd4f7c323/tapeforms/mixins.py#L257-L271
|
[
"def get_widget_template(self, field_name, field):\n \"\"\"\n Returns the optional widget template to use when rendering the widget\n for a form field.\n\n Preference of template selection:\n 1. Template from `widget_template_overrides` selected by field name\n 2. Template from `widget_template_overrides` selected by widget class\n\n By default, returns `None` which means \"use Django's default widget template\".\n\n :param field_name: The field name to select a widget template for.\n :param field: `Field` instance to return a widget template.\n :return: Template name to use when rendering the widget or `None`\n \"\"\"\n templates = self.widget_template_overrides or {}\n\n template_name = templates.get(field_name, None)\n if template_name:\n return template_name\n\n template_name = templates.get(field.widget.__class__, None)\n if template_name:\n return template_name\n\n return None\n"
] |
class TapeformMixin(TapeformLayoutMixin):
"""
Mixin to extend the forms capability to render itself as HTML output.
(using the template tags provided by `tapeforms`).
"""
#: Field template to use when rendering a bound form-field. Optional.
field_template = None
#: A dictionary of form-field names and/or form-field classes to override
#: the field template which is used when rendering a certain form-field.
#: Optional.
field_template_overrides = None
#: The CSS class to apply to the form-field container element.
field_container_css_class = 'form-field'
#: CSS class to append to the rendered field label tag. Optional.
field_label_css_class = None
#: An additional CSS class to append to the rendered field label tag when
#: the field has errors. Optional.
field_label_invalid_css_class = None
#: A dictionary of form-field names and/or widget classes to override
#: the widget template which is used when rendering a certain form-field.
#: Optional.
widget_template_overrides = None
#: CSS class to append to the widget attributes. Optional.
widget_css_class = None
#: An additional CSS class to append to the widget attributes when the field
#: has errors. Optional.
widget_invalid_css_class = None
def __init__(self, *args, **kwargs):
"""
The init method is overwritten to apply widget templates and CSS classes.
"""
super().__init__(*args, **kwargs)
for field_name in self.fields:
self.apply_widget_options(field_name)
self.apply_widget_template(field_name)
self.apply_widget_css_class(field_name)
def full_clean(self, *args, **kwargs):
"""
The full_clean method is hijacked to apply special treatment to invalid
field inputs. For example adding extra options/classes to widgets.
"""
super().full_clean(*args, **kwargs)
for field in self.errors:
if field != NON_FIELD_ERRORS:
self.apply_widget_invalid_options(field)
def get_field_template(self, bound_field, template_name=None):
"""
Returns the field template to use when rendering a form field to HTML.
Preference of template selection:
1. Provided method argument `template_name`
2. Template from `field_template_overrides` selected by field name
3. Template from `field_template_overrides` selected by field class
4. Form class property `field_template`
5. Globally defined default template from `defaults.LAYOUT_FIELD_TEMPLATE`
:param bound_field: `BoundField` instance to select a template for.
:param template_name: Optional template to use instead of other configurations.
:return: Template name to use when rendering the form field.
"""
if template_name:
return template_name
templates = self.field_template_overrides or {}
template_name = templates.get(bound_field.name, None)
if template_name:
return template_name
template_name = templates.get(bound_field.field.__class__, None)
if template_name:
return template_name
if self.field_template:
return self.field_template
return defaults.FIELD_DEFAULT_TEMPLATE
def get_field_container_css_class(self, bound_field):
"""
Returns the container CSS class to use when rendering a field template.
By default, returns the Form class property `field_container_css_class`.
:param bound_field: `BoundField` instance to return CSS class for.
:return: A CSS class string.
"""
return self.field_container_css_class or None
def get_field_label_css_class(self, bound_field):
"""
Returns the optional label CSS class to use when rendering a field template.
By default, returns the Form class property `field_label_css_class`. If the
field has errors and the Form class property `field_label_invalid_css_class`
is defined, its value is appended to the CSS class.
:param bound_field: `BoundField` instance to return CSS class for.
:return: A CSS class string or `None`
"""
class_name = self.field_label_css_class
if bound_field.errors and self.field_label_invalid_css_class:
class_name = join_css_class(
class_name, self.field_label_invalid_css_class)
return class_name or None
def get_field_context(self, bound_field):
"""
Returns the context which is used when rendering a form field to HTML.
The generated template context will contain the following variables:
* form: `Form` instance
* field: `BoundField` instance of the field
* field_id: Field ID to use in `<label for="..">`
* field_name: Name of the form field to render
* errors: `ErrorList` instance with errors of the field
* required: Boolean flag to signal if the field is required or not
* label: The label text of the field
* label_css_class: The optional label CSS class, might be `None`
* help_text: Optional help text for the form field. Might be `None`
* container_css_class: The CSS class for the field container.
* widget_class_name: Lowercased version of the widget class name (e.g. 'textinput')
* widget_input_type: `input_type` property of the widget instance,
falls back to `widget_class_name` if not available.
:return: Template context for field rendering.
"""
widget = bound_field.field.widget
widget_class_name = widget.__class__.__name__.lower()
# Check if we have an overwritten id in widget attrs,
# if not use auto_id of bound field.
field_id = widget.attrs.get('id') or bound_field.auto_id
if field_id:
field_id = widget.id_for_label(field_id)
return {
'form': self,
'field': bound_field,
'field_id': field_id,
'field_name': bound_field.name,
'errors': bound_field.errors,
'required': bound_field.field.required,
'label': bound_field.label,
'label_css_class': self.get_field_label_css_class(bound_field),
'help_text': mark_safe(bound_field.help_text) if bound_field.help_text else None,
'container_css_class': self.get_field_container_css_class(bound_field),
'widget_class_name': widget_class_name,
'widget_input_type': getattr(widget, 'input_type', None) or widget_class_name
}
def apply_widget_options(self, field_name):
"""
Applies additional widget options like changing the input type of DateInput
and TimeInput to "date" / "time" to enable Browser date pickers or other
attributes/properties.
"""
widget = self.fields[field_name].widget
if isinstance(widget, forms.DateInput):
widget.input_type = 'date'
if isinstance(widget, forms.TimeInput):
widget.input_type = 'time'
if isinstance(widget, forms.SplitDateTimeWidget):
widget.widgets[0].input_type = 'date'
widget.widgets[1].input_type = 'time'
def get_widget_template(self, field_name, field):
"""
Returns the optional widget template to use when rendering the widget
for a form field.
Preference of template selection:
1. Template from `widget_template_overrides` selected by field name
2. Template from `widget_template_overrides` selected by widget class
By default, returns `None` which means "use Django's default widget template".
:param field_name: The field name to select a widget template for.
:param field: `Field` instance to return a widget template.
:return: Template name to use when rendering the widget or `None`
"""
templates = self.widget_template_overrides or {}
template_name = templates.get(field_name, None)
if template_name:
return template_name
template_name = templates.get(field.widget.__class__, None)
if template_name:
return template_name
return None
def apply_widget_css_class(self, field_name):
"""
Applies CSS classes to widgets if available.
The method uses the `get_widget_css_class` method to determine if the widget
CSS class should be changed. If a CSS class is returned, it is appended to
the current value of the class property of the widget instance.
:param field_name: A field name of the form.
"""
field = self.fields[field_name]
class_name = self.get_widget_css_class(field_name, field)
if class_name:
field.widget.attrs['class'] = join_css_class(
field.widget.attrs.get('class', None), class_name)
def get_widget_css_class(self, field_name, field):
"""
Returns the optional widget CSS class to use when rendering the
form's field widget.
By default, returns `None` which means "no CSS class / no change".
:param field_name: The field name of the corresponding field for the widget.
:param field: `Field` instance to return CSS class for.
:return: A CSS class string or `None`
"""
return self.widget_css_class or None
def apply_widget_invalid_options(self, field_name):
"""
Applies additional widget options for an invalid field.
This method is called when there is some error on a field to apply
additional options on its widget. It does the following:
* Sets the aria-invalid property of the widget for accessibility.
* Adds an invalid CSS class, which is determined by the returned value
of `get_widget_invalid_css_class` method. If a CSS class is returned,
it is appended to the current value of the class property of the widget.
:param field_name: A field name of the form.
"""
field = self.fields[field_name]
class_name = self.get_widget_invalid_css_class(field_name, field)
if class_name:
field.widget.attrs['class'] = join_css_class(
field.widget.attrs.get('class', None), class_name)
field.widget.attrs['aria-invalid'] = 'true'
def get_widget_invalid_css_class(self, field_name, field):
"""
Returns the optional widget CSS class to append when rendering the
form's field widget in case of error.
By default, returns `None` which means "no CSS class / no change".
:param field_name: The field name of the corresponding field for the widget.
:param field: `Field` instance to return CSS class for.
:return: A CSS class string or `None`
"""
return self.widget_invalid_css_class or None
|
stephrdev/django-tapeforms
|
tapeforms/mixins.py
|
TapeformMixin.get_widget_template
|
python
|
def get_widget_template(self, field_name, field):
templates = self.widget_template_overrides or {}
template_name = templates.get(field_name, None)
if template_name:
return template_name
template_name = templates.get(field.widget.__class__, None)
if template_name:
return template_name
return None
|
Returns the optional widget template to use when rendering the widget
for a form field.
Preference of template selection:
1. Template from `widget_template_overrides` selected by field name
2. Template from `widget_template_overrides` selected by widget class
By default, returns `None` which means "use Django's default widget template".
:param field_name: The field name to select a widget template for.
:param field: `Field` instance to return a widget template.
:return: Template name to use when rendering the widget or `None`
|
train
|
https://github.com/stephrdev/django-tapeforms/blob/255602de43777141f18afaf30669d7bdd4f7c323/tapeforms/mixins.py#L273-L298
| null |
class TapeformMixin(TapeformLayoutMixin):
"""
Mixin to extend the forms capability to render itself as HTML output.
(using the template tags provided by `tapeforms`).
"""
#: Field template to use when rendering a bound form-field. Optional.
field_template = None
#: A dictionary of form-field names and/or form-field classes to override
#: the field template which is used when rendering a certain form-field.
#: Optional.
field_template_overrides = None
#: The CSS class to apply to the form-field container element.
field_container_css_class = 'form-field'
#: CSS class to append to the rendered field label tag. Optional.
field_label_css_class = None
#: An additional CSS class to append to the rendered field label tag when
#: the field has errors. Optional.
field_label_invalid_css_class = None
#: A dictionary of form-field names and/or widget classes to override
#: the widget template which is used when rendering a certain form-field.
#: Optional.
widget_template_overrides = None
#: CSS class to append to the widget attributes. Optional.
widget_css_class = None
#: An additional CSS class to append to the widget attributes when the field
#: has errors. Optional.
widget_invalid_css_class = None
def __init__(self, *args, **kwargs):
"""
The init method is overwritten to apply widget templates and CSS classes.
"""
super().__init__(*args, **kwargs)
for field_name in self.fields:
self.apply_widget_options(field_name)
self.apply_widget_template(field_name)
self.apply_widget_css_class(field_name)
def full_clean(self, *args, **kwargs):
"""
The full_clean method is hijacked to apply special treatment to invalid
field inputs. For example adding extra options/classes to widgets.
"""
super().full_clean(*args, **kwargs)
for field in self.errors:
if field != NON_FIELD_ERRORS:
self.apply_widget_invalid_options(field)
def get_field_template(self, bound_field, template_name=None):
"""
Returns the field template to use when rendering a form field to HTML.
Preference of template selection:
1. Provided method argument `template_name`
2. Template from `field_template_overrides` selected by field name
3. Template from `field_template_overrides` selected by field class
4. Form class property `field_template`
5. Globally defined default template from `defaults.LAYOUT_FIELD_TEMPLATE`
:param bound_field: `BoundField` instance to select a template for.
:param template_name: Optional template to use instead of other configurations.
:return: Template name to use when rendering the form field.
"""
if template_name:
return template_name
templates = self.field_template_overrides or {}
template_name = templates.get(bound_field.name, None)
if template_name:
return template_name
template_name = templates.get(bound_field.field.__class__, None)
if template_name:
return template_name
if self.field_template:
return self.field_template
return defaults.FIELD_DEFAULT_TEMPLATE
def get_field_container_css_class(self, bound_field):
"""
Returns the container CSS class to use when rendering a field template.
By default, returns the Form class property `field_container_css_class`.
:param bound_field: `BoundField` instance to return CSS class for.
:return: A CSS class string.
"""
return self.field_container_css_class or None
def get_field_label_css_class(self, bound_field):
"""
Returns the optional label CSS class to use when rendering a field template.
By default, returns the Form class property `field_label_css_class`. If the
field has errors and the Form class property `field_label_invalid_css_class`
is defined, its value is appended to the CSS class.
:param bound_field: `BoundField` instance to return CSS class for.
:return: A CSS class string or `None`
"""
class_name = self.field_label_css_class
if bound_field.errors and self.field_label_invalid_css_class:
class_name = join_css_class(
class_name, self.field_label_invalid_css_class)
return class_name or None
def get_field_context(self, bound_field):
"""
Returns the context which is used when rendering a form field to HTML.
The generated template context will contain the following variables:
* form: `Form` instance
* field: `BoundField` instance of the field
* field_id: Field ID to use in `<label for="..">`
* field_name: Name of the form field to render
* errors: `ErrorList` instance with errors of the field
* required: Boolean flag to signal if the field is required or not
* label: The label text of the field
* label_css_class: The optional label CSS class, might be `None`
* help_text: Optional help text for the form field. Might be `None`
* container_css_class: The CSS class for the field container.
* widget_class_name: Lowercased version of the widget class name (e.g. 'textinput')
* widget_input_type: `input_type` property of the widget instance,
falls back to `widget_class_name` if not available.
:return: Template context for field rendering.
"""
widget = bound_field.field.widget
widget_class_name = widget.__class__.__name__.lower()
# Check if we have an overwritten id in widget attrs,
# if not use auto_id of bound field.
field_id = widget.attrs.get('id') or bound_field.auto_id
if field_id:
field_id = widget.id_for_label(field_id)
return {
'form': self,
'field': bound_field,
'field_id': field_id,
'field_name': bound_field.name,
'errors': bound_field.errors,
'required': bound_field.field.required,
'label': bound_field.label,
'label_css_class': self.get_field_label_css_class(bound_field),
'help_text': mark_safe(bound_field.help_text) if bound_field.help_text else None,
'container_css_class': self.get_field_container_css_class(bound_field),
'widget_class_name': widget_class_name,
'widget_input_type': getattr(widget, 'input_type', None) or widget_class_name
}
def apply_widget_options(self, field_name):
"""
Applies additional widget options like changing the input type of DateInput
and TimeInput to "date" / "time" to enable Browser date pickers or other
attributes/properties.
"""
widget = self.fields[field_name].widget
if isinstance(widget, forms.DateInput):
widget.input_type = 'date'
if isinstance(widget, forms.TimeInput):
widget.input_type = 'time'
if isinstance(widget, forms.SplitDateTimeWidget):
widget.widgets[0].input_type = 'date'
widget.widgets[1].input_type = 'time'
def apply_widget_template(self, field_name):
"""
Applies widget template overrides if available.
The method uses the `get_widget_template` method to determine if the widget
template should be exchanged. If a template is available, the template_name
property of the widget instance is updated.
:param field_name: A field name of the form.
"""
field = self.fields[field_name]
template_name = self.get_widget_template(field_name, field)
if template_name:
field.widget.template_name = template_name
def apply_widget_css_class(self, field_name):
"""
Applies CSS classes to widgets if available.
The method uses the `get_widget_css_class` method to determine if the widget
CSS class should be changed. If a CSS class is returned, it is appended to
the current value of the class property of the widget instance.
:param field_name: A field name of the form.
"""
field = self.fields[field_name]
class_name = self.get_widget_css_class(field_name, field)
if class_name:
field.widget.attrs['class'] = join_css_class(
field.widget.attrs.get('class', None), class_name)
def get_widget_css_class(self, field_name, field):
"""
Returns the optional widget CSS class to use when rendering the
form's field widget.
By default, returns `None` which means "no CSS class / no change".
:param field_name: The field name of the corresponding field for the widget.
:param field: `Field` instance to return CSS class for.
:return: A CSS class string or `None`
"""
return self.widget_css_class or None
def apply_widget_invalid_options(self, field_name):
"""
Applies additional widget options for an invalid field.
This method is called when there is some error on a field to apply
additional options on its widget. It does the following:
* Sets the aria-invalid property of the widget for accessibility.
* Adds an invalid CSS class, which is determined by the returned value
of `get_widget_invalid_css_class` method. If a CSS class is returned,
it is appended to the current value of the class property of the widget.
:param field_name: A field name of the form.
"""
field = self.fields[field_name]
class_name = self.get_widget_invalid_css_class(field_name, field)
if class_name:
field.widget.attrs['class'] = join_css_class(
field.widget.attrs.get('class', None), class_name)
field.widget.attrs['aria-invalid'] = 'true'
def get_widget_invalid_css_class(self, field_name, field):
"""
Returns the optional widget CSS class to append when rendering the
form's field widget in case of error.
By default, returns `None` which means "no CSS class / no change".
:param field_name: The field name of the corresponding field for the widget.
:param field: `Field` instance to return CSS class for.
:return: A CSS class string or `None`
"""
return self.widget_invalid_css_class or None
|
stephrdev/django-tapeforms
|
tapeforms/mixins.py
|
TapeformMixin.apply_widget_css_class
|
python
|
def apply_widget_css_class(self, field_name):
field = self.fields[field_name]
class_name = self.get_widget_css_class(field_name, field)
if class_name:
field.widget.attrs['class'] = join_css_class(
field.widget.attrs.get('class', None), class_name)
|
Applies CSS classes to widgets if available.
The method uses the `get_widget_css_class` method to determine if the widget
CSS class should be changed. If a CSS class is returned, it is appended to
the current value of the class property of the widget instance.
:param field_name: A field name of the form.
|
train
|
https://github.com/stephrdev/django-tapeforms/blob/255602de43777141f18afaf30669d7bdd4f7c323/tapeforms/mixins.py#L300-L315
|
[
"def join_css_class(css_class, *additional_css_classes):\n \"\"\"\n Returns the union of one or more CSS classes as a space-separated string.\n Note that the order will not be preserved.\n \"\"\"\n css_set = set(chain.from_iterable(\n c.split(' ') for c in [css_class, *additional_css_classes] if c))\n return ' '.join(css_set)\n",
"def get_widget_css_class(self, field_name, field):\n \"\"\"\n Returns the optional widget CSS class to use when rendering the\n form's field widget.\n\n By default, returns `None` which means \"no CSS class / no change\".\n\n :param field_name: The field name of the corresponding field for the widget.\n :param field: `Field` instance to return CSS class for.\n :return: A CSS class string or `None`\n \"\"\"\n return self.widget_css_class or None\n"
] |
class TapeformMixin(TapeformLayoutMixin):
"""
Mixin to extend the forms capability to render itself as HTML output.
(using the template tags provided by `tapeforms`).
"""
#: Field template to use when rendering a bound form-field. Optional.
field_template = None
#: A dictionary of form-field names and/or form-field classes to override
#: the field template which is used when rendering a certain form-field.
#: Optional.
field_template_overrides = None
#: The CSS class to apply to the form-field container element.
field_container_css_class = 'form-field'
#: CSS class to append to the rendered field label tag. Optional.
field_label_css_class = None
#: An additional CSS class to append to the rendered field label tag when
#: the field has errors. Optional.
field_label_invalid_css_class = None
#: A dictionary of form-field names and/or widget classes to override
#: the widget template which is used when rendering a certain form-field.
#: Optional.
widget_template_overrides = None
#: CSS class to append to the widget attributes. Optional.
widget_css_class = None
#: An additional CSS class to append to the widget attributes when the field
#: has errors. Optional.
widget_invalid_css_class = None
def __init__(self, *args, **kwargs):
"""
The init method is overwritten to apply widget templates and CSS classes.
"""
super().__init__(*args, **kwargs)
for field_name in self.fields:
self.apply_widget_options(field_name)
self.apply_widget_template(field_name)
self.apply_widget_css_class(field_name)
def full_clean(self, *args, **kwargs):
"""
The full_clean method is hijacked to apply special treatment to invalid
field inputs. For example adding extra options/classes to widgets.
"""
super().full_clean(*args, **kwargs)
for field in self.errors:
if field != NON_FIELD_ERRORS:
self.apply_widget_invalid_options(field)
def get_field_template(self, bound_field, template_name=None):
"""
Returns the field template to use when rendering a form field to HTML.
Preference of template selection:
1. Provided method argument `template_name`
2. Template from `field_template_overrides` selected by field name
3. Template from `field_template_overrides` selected by field class
4. Form class property `field_template`
5. Globally defined default template from `defaults.LAYOUT_FIELD_TEMPLATE`
:param bound_field: `BoundField` instance to select a template for.
:param template_name: Optional template to use instead of other configurations.
:return: Template name to use when rendering the form field.
"""
if template_name:
return template_name
templates = self.field_template_overrides or {}
template_name = templates.get(bound_field.name, None)
if template_name:
return template_name
template_name = templates.get(bound_field.field.__class__, None)
if template_name:
return template_name
if self.field_template:
return self.field_template
return defaults.FIELD_DEFAULT_TEMPLATE
def get_field_container_css_class(self, bound_field):
"""
Returns the container CSS class to use when rendering a field template.
By default, returns the Form class property `field_container_css_class`.
:param bound_field: `BoundField` instance to return CSS class for.
:return: A CSS class string.
"""
return self.field_container_css_class or None
def get_field_label_css_class(self, bound_field):
"""
Returns the optional label CSS class to use when rendering a field template.
By default, returns the Form class property `field_label_css_class`. If the
field has errors and the Form class property `field_label_invalid_css_class`
is defined, its value is appended to the CSS class.
:param bound_field: `BoundField` instance to return CSS class for.
:return: A CSS class string or `None`
"""
class_name = self.field_label_css_class
if bound_field.errors and self.field_label_invalid_css_class:
class_name = join_css_class(
class_name, self.field_label_invalid_css_class)
return class_name or None
def get_field_context(self, bound_field):
"""
Returns the context which is used when rendering a form field to HTML.
The generated template context will contain the following variables:
* form: `Form` instance
* field: `BoundField` instance of the field
* field_id: Field ID to use in `<label for="..">`
* field_name: Name of the form field to render
* errors: `ErrorList` instance with errors of the field
* required: Boolean flag to signal if the field is required or not
* label: The label text of the field
* label_css_class: The optional label CSS class, might be `None`
* help_text: Optional help text for the form field. Might be `None`
* container_css_class: The CSS class for the field container.
* widget_class_name: Lowercased version of the widget class name (e.g. 'textinput')
* widget_input_type: `input_type` property of the widget instance,
falls back to `widget_class_name` if not available.
:return: Template context for field rendering.
"""
widget = bound_field.field.widget
widget_class_name = widget.__class__.__name__.lower()
# Check if we have an overwritten id in widget attrs,
# if not use auto_id of bound field.
field_id = widget.attrs.get('id') or bound_field.auto_id
if field_id:
field_id = widget.id_for_label(field_id)
return {
'form': self,
'field': bound_field,
'field_id': field_id,
'field_name': bound_field.name,
'errors': bound_field.errors,
'required': bound_field.field.required,
'label': bound_field.label,
'label_css_class': self.get_field_label_css_class(bound_field),
'help_text': mark_safe(bound_field.help_text) if bound_field.help_text else None,
'container_css_class': self.get_field_container_css_class(bound_field),
'widget_class_name': widget_class_name,
'widget_input_type': getattr(widget, 'input_type', None) or widget_class_name
}
def apply_widget_options(self, field_name):
"""
Applies additional widget options like changing the input type of DateInput
and TimeInput to "date" / "time" to enable Browser date pickers or other
attributes/properties.
"""
widget = self.fields[field_name].widget
if isinstance(widget, forms.DateInput):
widget.input_type = 'date'
if isinstance(widget, forms.TimeInput):
widget.input_type = 'time'
if isinstance(widget, forms.SplitDateTimeWidget):
widget.widgets[0].input_type = 'date'
widget.widgets[1].input_type = 'time'
def apply_widget_template(self, field_name):
"""
Applies widget template overrides if available.
The method uses the `get_widget_template` method to determine if the widget
template should be exchanged. If a template is available, the template_name
property of the widget instance is updated.
:param field_name: A field name of the form.
"""
field = self.fields[field_name]
template_name = self.get_widget_template(field_name, field)
if template_name:
field.widget.template_name = template_name
def get_widget_template(self, field_name, field):
"""
Returns the optional widget template to use when rendering the widget
for a form field.
Preference of template selection:
1. Template from `widget_template_overrides` selected by field name
2. Template from `widget_template_overrides` selected by widget class
By default, returns `None` which means "use Django's default widget template".
:param field_name: The field name to select a widget template for.
:param field: `Field` instance to return a widget template.
:return: Template name to use when rendering the widget or `None`
"""
templates = self.widget_template_overrides or {}
template_name = templates.get(field_name, None)
if template_name:
return template_name
template_name = templates.get(field.widget.__class__, None)
if template_name:
return template_name
return None
def get_widget_css_class(self, field_name, field):
"""
Returns the optional widget CSS class to use when rendering the
form's field widget.
By default, returns `None` which means "no CSS class / no change".
:param field_name: The field name of the corresponding field for the widget.
:param field: `Field` instance to return CSS class for.
:return: A CSS class string or `None`
"""
return self.widget_css_class or None
def apply_widget_invalid_options(self, field_name):
"""
Applies additional widget options for an invalid field.
This method is called when there is some error on a field to apply
additional options on its widget. It does the following:
* Sets the aria-invalid property of the widget for accessibility.
* Adds an invalid CSS class, which is determined by the returned value
of `get_widget_invalid_css_class` method. If a CSS class is returned,
it is appended to the current value of the class property of the widget.
:param field_name: A field name of the form.
"""
field = self.fields[field_name]
class_name = self.get_widget_invalid_css_class(field_name, field)
if class_name:
field.widget.attrs['class'] = join_css_class(
field.widget.attrs.get('class', None), class_name)
field.widget.attrs['aria-invalid'] = 'true'
def get_widget_invalid_css_class(self, field_name, field):
"""
Returns the optional widget CSS class to append when rendering the
form's field widget in case of error.
By default, returns `None` which means "no CSS class / no change".
:param field_name: The field name of the corresponding field for the widget.
:param field: `Field` instance to return CSS class for.
:return: A CSS class string or `None`
"""
return self.widget_invalid_css_class or None
|
stephrdev/django-tapeforms
|
tapeforms/mixins.py
|
TapeformMixin.apply_widget_invalid_options
|
python
|
def apply_widget_invalid_options(self, field_name):
field = self.fields[field_name]
class_name = self.get_widget_invalid_css_class(field_name, field)
if class_name:
field.widget.attrs['class'] = join_css_class(
field.widget.attrs.get('class', None), class_name)
field.widget.attrs['aria-invalid'] = 'true'
|
Applies additional widget options for an invalid field.
This method is called when there is some error on a field to apply
additional options on its widget. It does the following:
* Sets the aria-invalid property of the widget for accessibility.
* Adds an invalid CSS class, which is determined by the returned value
of `get_widget_invalid_css_class` method. If a CSS class is returned,
it is appended to the current value of the class property of the widget.
:param field_name: A field name of the form.
|
train
|
https://github.com/stephrdev/django-tapeforms/blob/255602de43777141f18afaf30669d7bdd4f7c323/tapeforms/mixins.py#L330-L351
|
[
"def join_css_class(css_class, *additional_css_classes):\n \"\"\"\n Returns the union of one or more CSS classes as a space-separated string.\n Note that the order will not be preserved.\n \"\"\"\n css_set = set(chain.from_iterable(\n c.split(' ') for c in [css_class, *additional_css_classes] if c))\n return ' '.join(css_set)\n",
"def get_widget_invalid_css_class(self, field_name, field):\n \"\"\"\n Returns the optional widget CSS class to append when rendering the\n form's field widget in case of error.\n\n By default, returns `None` which means \"no CSS class / no change\".\n\n :param field_name: The field name of the corresponding field for the widget.\n :param field: `Field` instance to return CSS class for.\n :return: A CSS class string or `None`\n \"\"\"\n return self.widget_invalid_css_class or None\n"
] |
class TapeformMixin(TapeformLayoutMixin):
"""
Mixin to extend the forms capability to render itself as HTML output.
(using the template tags provided by `tapeforms`).
"""
#: Field template to use when rendering a bound form-field. Optional.
field_template = None
#: A dictionary of form-field names and/or form-field classes to override
#: the field template which is used when rendering a certain form-field.
#: Optional.
field_template_overrides = None
#: The CSS class to apply to the form-field container element.
field_container_css_class = 'form-field'
#: CSS class to append to the rendered field label tag. Optional.
field_label_css_class = None
#: An additional CSS class to append to the rendered field label tag when
#: the field has errors. Optional.
field_label_invalid_css_class = None
#: A dictionary of form-field names and/or widget classes to override
#: the widget template which is used when rendering a certain form-field.
#: Optional.
widget_template_overrides = None
#: CSS class to append to the widget attributes. Optional.
widget_css_class = None
#: An additional CSS class to append to the widget attributes when the field
#: has errors. Optional.
widget_invalid_css_class = None
def __init__(self, *args, **kwargs):
"""
The init method is overwritten to apply widget templates and CSS classes.
"""
super().__init__(*args, **kwargs)
for field_name in self.fields:
self.apply_widget_options(field_name)
self.apply_widget_template(field_name)
self.apply_widget_css_class(field_name)
def full_clean(self, *args, **kwargs):
"""
The full_clean method is hijacked to apply special treatment to invalid
field inputs. For example adding extra options/classes to widgets.
"""
super().full_clean(*args, **kwargs)
for field in self.errors:
if field != NON_FIELD_ERRORS:
self.apply_widget_invalid_options(field)
def get_field_template(self, bound_field, template_name=None):
"""
Returns the field template to use when rendering a form field to HTML.
Preference of template selection:
1. Provided method argument `template_name`
2. Template from `field_template_overrides` selected by field name
3. Template from `field_template_overrides` selected by field class
4. Form class property `field_template`
5. Globally defined default template from `defaults.LAYOUT_FIELD_TEMPLATE`
:param bound_field: `BoundField` instance to select a template for.
:param template_name: Optional template to use instead of other configurations.
:return: Template name to use when rendering the form field.
"""
if template_name:
return template_name
templates = self.field_template_overrides or {}
template_name = templates.get(bound_field.name, None)
if template_name:
return template_name
template_name = templates.get(bound_field.field.__class__, None)
if template_name:
return template_name
if self.field_template:
return self.field_template
return defaults.FIELD_DEFAULT_TEMPLATE
def get_field_container_css_class(self, bound_field):
"""
Returns the container CSS class to use when rendering a field template.
By default, returns the Form class property `field_container_css_class`.
:param bound_field: `BoundField` instance to return CSS class for.
:return: A CSS class string.
"""
return self.field_container_css_class or None
def get_field_label_css_class(self, bound_field):
"""
Returns the optional label CSS class to use when rendering a field template.
By default, returns the Form class property `field_label_css_class`. If the
field has errors and the Form class property `field_label_invalid_css_class`
is defined, its value is appended to the CSS class.
:param bound_field: `BoundField` instance to return CSS class for.
:return: A CSS class string or `None`
"""
class_name = self.field_label_css_class
if bound_field.errors and self.field_label_invalid_css_class:
class_name = join_css_class(
class_name, self.field_label_invalid_css_class)
return class_name or None
def get_field_context(self, bound_field):
"""
Returns the context which is used when rendering a form field to HTML.
The generated template context will contain the following variables:
* form: `Form` instance
* field: `BoundField` instance of the field
* field_id: Field ID to use in `<label for="..">`
* field_name: Name of the form field to render
* errors: `ErrorList` instance with errors of the field
* required: Boolean flag to signal if the field is required or not
* label: The label text of the field
* label_css_class: The optional label CSS class, might be `None`
* help_text: Optional help text for the form field. Might be `None`
* container_css_class: The CSS class for the field container.
* widget_class_name: Lowercased version of the widget class name (e.g. 'textinput')
* widget_input_type: `input_type` property of the widget instance,
falls back to `widget_class_name` if not available.
:return: Template context for field rendering.
"""
widget = bound_field.field.widget
widget_class_name = widget.__class__.__name__.lower()
# Check if we have an overwritten id in widget attrs,
# if not use auto_id of bound field.
field_id = widget.attrs.get('id') or bound_field.auto_id
if field_id:
field_id = widget.id_for_label(field_id)
return {
'form': self,
'field': bound_field,
'field_id': field_id,
'field_name': bound_field.name,
'errors': bound_field.errors,
'required': bound_field.field.required,
'label': bound_field.label,
'label_css_class': self.get_field_label_css_class(bound_field),
'help_text': mark_safe(bound_field.help_text) if bound_field.help_text else None,
'container_css_class': self.get_field_container_css_class(bound_field),
'widget_class_name': widget_class_name,
'widget_input_type': getattr(widget, 'input_type', None) or widget_class_name
}
def apply_widget_options(self, field_name):
"""
Applies additional widget options like changing the input type of DateInput
and TimeInput to "date" / "time" to enable Browser date pickers or other
attributes/properties.
"""
widget = self.fields[field_name].widget
if isinstance(widget, forms.DateInput):
widget.input_type = 'date'
if isinstance(widget, forms.TimeInput):
widget.input_type = 'time'
if isinstance(widget, forms.SplitDateTimeWidget):
widget.widgets[0].input_type = 'date'
widget.widgets[1].input_type = 'time'
def apply_widget_template(self, field_name):
"""
Applies widget template overrides if available.
The method uses the `get_widget_template` method to determine if the widget
template should be exchanged. If a template is available, the template_name
property of the widget instance is updated.
:param field_name: A field name of the form.
"""
field = self.fields[field_name]
template_name = self.get_widget_template(field_name, field)
if template_name:
field.widget.template_name = template_name
def get_widget_template(self, field_name, field):
"""
Returns the optional widget template to use when rendering the widget
for a form field.
Preference of template selection:
1. Template from `widget_template_overrides` selected by field name
2. Template from `widget_template_overrides` selected by widget class
By default, returns `None` which means "use Django's default widget template".
:param field_name: The field name to select a widget template for.
:param field: `Field` instance to return a widget template.
:return: Template name to use when rendering the widget or `None`
"""
templates = self.widget_template_overrides or {}
template_name = templates.get(field_name, None)
if template_name:
return template_name
template_name = templates.get(field.widget.__class__, None)
if template_name:
return template_name
return None
def apply_widget_css_class(self, field_name):
"""
Applies CSS classes to widgets if available.
The method uses the `get_widget_css_class` method to determine if the widget
CSS class should be changed. If a CSS class is returned, it is appended to
the current value of the class property of the widget instance.
:param field_name: A field name of the form.
"""
field = self.fields[field_name]
class_name = self.get_widget_css_class(field_name, field)
if class_name:
field.widget.attrs['class'] = join_css_class(
field.widget.attrs.get('class', None), class_name)
def get_widget_css_class(self, field_name, field):
"""
Returns the optional widget CSS class to use when rendering the
form's field widget.
By default, returns `None` which means "no CSS class / no change".
:param field_name: The field name of the corresponding field for the widget.
:param field: `Field` instance to return CSS class for.
:return: A CSS class string or `None`
"""
return self.widget_css_class or None
def get_widget_invalid_css_class(self, field_name, field):
"""
Returns the optional widget CSS class to append when rendering the
form's field widget in case of error.
By default, returns `None` which means "no CSS class / no change".
:param field_name: The field name of the corresponding field for the widget.
:param field: `Field` instance to return CSS class for.
:return: A CSS class string or `None`
"""
return self.widget_invalid_css_class or None
|
stephrdev/django-tapeforms
|
tapeforms/contrib/foundation.py
|
FoundationTapeformMixin.get_field_template
|
python
|
def get_field_template(self, bound_field, template_name=None):
template_name = super().get_field_template(bound_field, template_name)
if (template_name == self.field_template and
isinstance(bound_field.field.widget, (
forms.RadioSelect, forms.CheckboxSelectMultiple))):
return 'tapeforms/fields/foundation_fieldset.html'
return template_name
|
Uses a special field template for widget with multiple inputs. It only
applies if no other template than the default one has been defined.
|
train
|
https://github.com/stephrdev/django-tapeforms/blob/255602de43777141f18afaf30669d7bdd4f7c323/tapeforms/contrib/foundation.py#L27-L39
|
[
"def get_field_template(self, bound_field, template_name=None):\n \"\"\"\n Returns the field template to use when rendering a form field to HTML.\n\n Preference of template selection:\n\n 1. Provided method argument `template_name`\n 2. Template from `field_template_overrides` selected by field name\n 3. Template from `field_template_overrides` selected by field class\n 4. Form class property `field_template`\n 5. Globally defined default template from `defaults.LAYOUT_FIELD_TEMPLATE`\n\n :param bound_field: `BoundField` instance to select a template for.\n :param template_name: Optional template to use instead of other configurations.\n :return: Template name to use when rendering the form field.\n \"\"\"\n if template_name:\n return template_name\n\n templates = self.field_template_overrides or {}\n\n template_name = templates.get(bound_field.name, None)\n if template_name:\n return template_name\n\n template_name = templates.get(bound_field.field.__class__, None)\n if template_name:\n return template_name\n\n if self.field_template:\n return self.field_template\n\n return defaults.FIELD_DEFAULT_TEMPLATE\n"
] |
class FoundationTapeformMixin(TapeformMixin):
"""
Tapeform Mixin to render Foundation compatible forms.
(using the template tags provided by `tapeforms`).
"""
#: Use a special layout template for Foundation compatible forms.
layout_template = 'tapeforms/layouts/foundation.html'
#: Use a special field template for Foundation compatible forms.
field_template = 'tapeforms/fields/foundation.html'
#: Use a special class to invalid field's label.
field_label_invalid_css_class = 'is-invalid-label'
#: Use a special class to invalid field's widget.
widget_invalid_css_class = 'is-invalid-input'
#: Widgets with multiple inputs require some extra care (don't use ul, etc.)
widget_template_overrides = {
forms.RadioSelect: 'tapeforms/widgets/foundation_multipleinput.html',
forms.CheckboxSelectMultiple: 'tapeforms/widgets/foundation_multipleinput.html'
}
|
stephrdev/django-tapeforms
|
tapeforms/contrib/bootstrap.py
|
BootstrapTapeformMixin.get_field_label_css_class
|
python
|
def get_field_label_css_class(self, bound_field):
# If we render CheckboxInputs, Bootstrap requires a different
# field label css class for checkboxes.
if isinstance(bound_field.field.widget, forms.CheckboxInput):
return 'form-check-label'
return super().get_field_label_css_class(bound_field)
|
Returns 'form-check-label' if widget is CheckboxInput. For all other fields,
no css class is added.
|
train
|
https://github.com/stephrdev/django-tapeforms/blob/255602de43777141f18afaf30669d7bdd4f7c323/tapeforms/contrib/bootstrap.py#L43-L53
|
[
"def get_field_label_css_class(self, bound_field):\n \"\"\"\n Returns the optional label CSS class to use when rendering a field template.\n\n By default, returns the Form class property `field_label_css_class`. If the\n field has errors and the Form class property `field_label_invalid_css_class`\n is defined, its value is appended to the CSS class.\n\n :param bound_field: `BoundField` instance to return CSS class for.\n :return: A CSS class string or `None`\n \"\"\"\n class_name = self.field_label_css_class\n\n if bound_field.errors and self.field_label_invalid_css_class:\n class_name = join_css_class(\n class_name, self.field_label_invalid_css_class)\n\n return class_name or None\n"
] |
class BootstrapTapeformMixin(TapeformMixin):
"""
Tapeform Mixin to render Bootstrap4 compatible forms.
(using the template tags provided by `tapeforms`).
"""
#: Use a special layout template for Bootstrap compatible forms.
layout_template = 'tapeforms/layouts/bootstrap.html'
#: Use a special field template for Bootstrap compatible forms.
field_template = 'tapeforms/fields/bootstrap.html'
#: Bootstrap requires that the field has a css class "form-group" applied.
field_container_css_class = 'form-group'
#: All widgets need a css class "form-control" (expect checkboxes).
widget_css_class = 'form-control'
#: Use a special class to invalid field's widget.
widget_invalid_css_class = 'is-invalid'
#: Widgets with multiple inputs require some extra care (don't use ul, etc.)
widget_template_overrides = {
forms.SelectDateWidget: 'tapeforms/widgets/bootstrap_multiwidget.html',
forms.SplitDateTimeWidget: 'tapeforms/widgets/bootstrap_multiwidget.html',
forms.RadioSelect: 'tapeforms/widgets/bootstrap_multipleinput.html',
forms.CheckboxSelectMultiple: 'tapeforms/widgets/bootstrap_multipleinput.html'
}
def get_field_container_css_class(self, bound_field):
"""
Returns 'form-check' if widget is CheckboxInput. For all other fields,
return the default value from the form property ("form-group").
"""
# If we render CheckboxInputs, Bootstrap requires a different
# container class for checkboxes.
if isinstance(bound_field.field.widget, forms.CheckboxInput):
return 'form-check'
return super().get_field_container_css_class(bound_field)
def get_widget_css_class(self, field_name, field):
"""
Returns 'form-check-input' if widget is CheckboxInput or 'form-control-file'
if widget is FileInput. For all other fields return the default value
from the form property ("form-control").
"""
# If we render CheckboxInputs, Bootstrap requires a different
# widget css class for checkboxes.
if isinstance(field.widget, forms.CheckboxInput):
return 'form-check-input'
# Idem for fileinput.
if isinstance(field.widget, forms.FileInput):
return 'form-control-file'
return super().get_widget_css_class(field_name, field)
|
stephrdev/django-tapeforms
|
tapeforms/contrib/bootstrap.py
|
BootstrapTapeformMixin.get_widget_css_class
|
python
|
def get_widget_css_class(self, field_name, field):
# If we render CheckboxInputs, Bootstrap requires a different
# widget css class for checkboxes.
if isinstance(field.widget, forms.CheckboxInput):
return 'form-check-input'
# Idem for fileinput.
if isinstance(field.widget, forms.FileInput):
return 'form-control-file'
return super().get_widget_css_class(field_name, field)
|
Returns 'form-check-input' if widget is CheckboxInput or 'form-control-file'
if widget is FileInput. For all other fields return the default value
from the form property ("form-control").
|
train
|
https://github.com/stephrdev/django-tapeforms/blob/255602de43777141f18afaf30669d7bdd4f7c323/tapeforms/contrib/bootstrap.py#L55-L70
| null |
class BootstrapTapeformMixin(TapeformMixin):
"""
Tapeform Mixin to render Bootstrap4 compatible forms.
(using the template tags provided by `tapeforms`).
"""
#: Use a special layout template for Bootstrap compatible forms.
layout_template = 'tapeforms/layouts/bootstrap.html'
#: Use a special field template for Bootstrap compatible forms.
field_template = 'tapeforms/fields/bootstrap.html'
#: Bootstrap requires that the field has a css class "form-group" applied.
field_container_css_class = 'form-group'
#: All widgets need a css class "form-control" (expect checkboxes).
widget_css_class = 'form-control'
#: Use a special class to invalid field's widget.
widget_invalid_css_class = 'is-invalid'
#: Widgets with multiple inputs require some extra care (don't use ul, etc.)
widget_template_overrides = {
forms.SelectDateWidget: 'tapeforms/widgets/bootstrap_multiwidget.html',
forms.SplitDateTimeWidget: 'tapeforms/widgets/bootstrap_multiwidget.html',
forms.RadioSelect: 'tapeforms/widgets/bootstrap_multipleinput.html',
forms.CheckboxSelectMultiple: 'tapeforms/widgets/bootstrap_multipleinput.html'
}
def get_field_container_css_class(self, bound_field):
"""
Returns 'form-check' if widget is CheckboxInput. For all other fields,
return the default value from the form property ("form-group").
"""
# If we render CheckboxInputs, Bootstrap requires a different
# container class for checkboxes.
if isinstance(bound_field.field.widget, forms.CheckboxInput):
return 'form-check'
return super().get_field_container_css_class(bound_field)
def get_field_label_css_class(self, bound_field):
"""
Returns 'form-check-label' if widget is CheckboxInput. For all other fields,
no css class is added.
"""
# If we render CheckboxInputs, Bootstrap requires a different
# field label css class for checkboxes.
if isinstance(bound_field.field.widget, forms.CheckboxInput):
return 'form-check-label'
return super().get_field_label_css_class(bound_field)
|
ten10solutions/Geist
|
geist/backends/replay.py
|
geist_replay
|
python
|
def geist_replay(wrapped, instance, args, kwargs):
path_parts = []
file_parts = []
if hasattr(wrapped, '__module__'):
module = wrapped.__module__
module_file = sys.modules[module].__file__
root, _file = os.path.split(module_file)
path_parts.append(root)
_file, _ = os.path.splitext(_file)
file_parts.append(_file)
if hasattr(wrapped, '__objclass__'):
file_parts.append(wrapped.__objclass__.__name__)
elif hasattr(wrapped, '__self__'):
file_parts.append(wrapped.__self__.__class__.__name__)
file_parts.append(wrapped.__name__ + '.log')
path_parts.append('_'.join(file_parts))
filename = os.path.join(*path_parts)
if is_in_record_mode():
platform_backend = get_platform_backend()
backend = RecordingBackend(
source_backend=platform_backend,
recording_filename=filename
)
else:
backend = PlaybackBackend(
recording_filename=filename
)
gui = GUI(backend)
return wrapped(gui, *args, **kwargs)
|
Wraps a test of other function and injects a Geist GUI which will
enable replay (set environment variable GEIST_REPLAY_MODE to 'record' to
active record mode.
|
train
|
https://github.com/ten10solutions/Geist/blob/a1ef16d8b4c3777735008b671a50acfde3ce7bf1/geist/backends/replay.py#L51-L84
|
[
"def get_platform_backend(**kwargs):\n if sys.platform.startswith('win'):\n import geist.backends.windows\n return geist.backends.windows.GeistWindowsBackend(**kwargs)\n else:\n import geist.backends.xvfb\n return geist.backends.xvfb.GeistXvfbBackend(**kwargs)\n",
"def is_in_record_mode():\n return os.environ.get(_MODE_ENV_VAR_NAME, '') == _RECORD_MODE_ENV_VAR_VALUE\n"
] |
from __future__ import division, absolute_import, print_function
import os
import sys
import json
import base64
import StringIO
import wrapt
from PIL import Image
from . import get_platform_backend
from ..core import GUI
from ..finders import Location, LocationList
from ._common import BackendActionBuilder
import numpy
import logging
logger = logging.getLogger(__name__)
"""These backends provide support for writing tests of code using Geist.
The intention is for you to write unit tests which can be run in record mode
on the system under test which will produce an output file which can then run
the test in playback mode.
Example:
User the @geist_replay decorator on your test.
To record set the GEIST_REPLAY_MODE=record environment variable
Windows:
set GEIST_REPLAY_MODE=record
Linux:
export GEIST_REPLAY_MODE=record
To replay don't declare or set the GEIST_REPLAY_MODE to any other value.
"""
_MODE_ENV_VAR_NAME = 'GEIST_REPLAY_MODE'
_RECORD_MODE_ENV_VAR_VALUE = 'record'
def is_in_record_mode():
return os.environ.get(_MODE_ENV_VAR_NAME, '') == _RECORD_MODE_ENV_VAR_VALUE
@wrapt.decorator
class _ActionsTransaction(object):
def __init__(self, backend):
self._actions_builder = BackendActionBuilder(backend)
def __enter__(self):
return self._actions_builder
def __exit__(self, *args):
self._actions_builder.execute()
return False
_FORMAT_VERSION = [0, 1]
class PlaybackBackend(object):
def __init__(self, recording_filename='backend_recording.log'):
self._record_file = open(recording_filename, 'rb')
version = json.loads(next(self._record_file))
assert version['version'] == _FORMAT_VERSION
def capture_locations(self):
logger.debug('replay func: "capture_locations" called')
try:
data = json.loads(next(self._record_file))
except StopIteration:
raise AssertionError('capture_locations end of replay')
if data['func'] != 'capture_locations':
raise AssertionError(
"function mismatch recorded %r != 'capture_locations'" %
(data['func'],)
)
b64_locations = data['locations']
location_list = LocationList()
for b64_location in b64_locations:
base64_png = b64_location['base64_png']
string_file = StringIO.StringIO(
base64.b64decode(base64_png)
)
x, y, w, h = (
b64_location['x'],
b64_location['y'],
b64_location['w'],
b64_location['h']
)
image = numpy.array(Image.open(string_file))
location_list.append(Location(x, y, w, h, image=image))
return location_list
def actions_transaction(self):
return _ActionsTransaction(self)
def _json_type_coercian(self, data):
return json.loads(json.dumps(data))
def __getattr__(self, name):
def func(*args, **kwargs):
logger.debug('replay func: %r called with args: %r, kwargs %r', name, args, kwargs)
try:
data = json.loads(next(self._record_file))
except StopIteration:
raise AssertionError('%s end of replay' % (name,))
func_name = name
recd_name = data['func']
if func_name != recd_name:
raise AssertionError(
'function mismatch recorded %r != %r' %
(recd_name, func_name)
)
recd_args = data['args']
recd_kwargs = data['kwargs']
if self._json_type_coercian(args) != recd_args:
raise AssertionError(
'args mismatch recorded %r != %r' % (recd_args, args)
)
if self._json_type_coercian(kwargs) != recd_kwargs:
raise AssertionError(
'kwargs mismatch recorded %r != %r' % (recd_kwargs, kwargs)
)
return func
class RecordingBackend(object):
def __init__(
self,
source_backend=None,
recording_filename='backend_recording.log',
**kwargs
):
if source_backend is None:
raise ValueError('source_backend is required for %r' % (self))
self._source_backend = source_backend
self._record_file = open(recording_filename, 'wb')
json.dump({'version': _FORMAT_VERSION}, self._record_file)
self._record_file.write('\n')
self._record_file.flush()
def _write_action(self, funcname, *args, **kwargs):
json.dump(
{'func': funcname, 'args': args, 'kwargs': kwargs},
self._record_file
)
self._record_file.write('\n')
self._record_file.flush()
def _write_capture_locations(self, locations):
b64_locations = []
for location in locations:
string_file = StringIO.StringIO()
Image.fromarray(location.image).save(string_file, 'png')
b64_png = base64.b64encode(string_file.getvalue())
b64_locations.append({
'base64_png': b64_png,
'x': location.x,
'y': location.y,
'w': location.w,
'h': location.h
})
json.dump(
{'func': 'capture_locations', 'locations': b64_locations},
self._record_file
)
self._record_file.write('\n')
self._record_file.flush()
def capture_locations(self):
locations = list(self._source_backend.capture_locations())
self._write_capture_locations(locations)
return locations
def actions_transaction(self):
return _ActionsTransaction(self)
def __getattr__(self, name):
if not name.startswith('_'):
def callable_action(*args, **kwargs):
getattr(self._source_backend, name)(*args, **kwargs)
self._write_action(name, *args, **kwargs)
return callable_action
|
ten10solutions/Geist
|
geist/backends/xvfb.py
|
GeistXvfbBackend._find_display
|
python
|
def _find_display(self):
self.display_num = 2
while os.path.isdir(XVFB_PATH % (self.display_num,)):
self.display_num += 1
|
Find a usable display, which doesn't have an existing Xvfb file
|
train
|
https://github.com/ten10solutions/Geist/blob/a1ef16d8b4c3777735008b671a50acfde3ce7bf1/geist/backends/xvfb.py#L85-L91
| null |
class GeistXvfbBackend(GeistXBase):
_FB_OFFSET = 3232
def __init__(self, **kwargs):
self.display_num = kwargs.get('display_num', None)
width = kwargs.get('width', 1280)
height = kwargs.get('height', 1024)
if self.display_num is None:
self._find_display()
display = ":%d" % (self.display_num, )
self._display_dir = XVFB_PATH % (self.display_num,)
os.makedirs(self._display_dir, 0700)
dev_null = open('/dev/null', 'w')
self._xvfb_proc = subprocess.Popen(
[
"Xvfb",
display,
"-screen",
"0",
"%dx%dx24" % (width, height),
"-fbdir",
self._display_dir,
],
stdout=dev_null,
stderr=subprocess.STDOUT
)
fb_filepath = '%s/Xvfb_screen0' % (self._display_dir,)
start_t = time.time()
while not os.path.exists(fb_filepath):
if time.time() - start_t > 10:
raise Exception('Xvfb mmap file did not appear')
time.sleep(1)
GeistXBase.__init__(self, display=display)
self._xwd_reader = XwdToNumpyReader(fb_filepath)
logger.info("Started Xvfb with file in %s", self._display_dir)
def capture_locations(self):
image = self._xwd_reader.get_image()
h, w = image.shape[:2]
return LocationList([Location(0, 0, w, h, image=image)])
def close(self):
GeistXBase.close(self)
if hasattr(self, '_xvfb_proc'):
self._xvfb_proc.kill()
self._xvfb_proc.wait()
shutil.rmtree(self._display_dir, ignore_errors=False)
del self._xvfb_proc
|
ten10solutions/Geist
|
geist/vision.py
|
pad_bin_image_to_shape
|
python
|
def pad_bin_image_to_shape(image, shape):
"""
Padd image to size :shape: with zeros
"""
h, w = shape
ih, iw = image.shape
assert ih <= h
assert iw <= w
if iw < w:
result = numpy.hstack((image, numpy.zeros((ih, w - iw), bool)))
else:
result = image
if ih < h:
result = numpy.vstack((result, numpy.zeros((h - ih, w), bool)))
return result
|
Padd image to size :shape: with zeros
|
train
|
https://github.com/ten10solutions/Geist/blob/a1ef16d8b4c3777735008b671a50acfde3ce7bf1/geist/vision.py#L13-L27
| null |
from __future__ import division
from numpy.fft import irfft2, rfft2
import numpy
import itertools
import operator
def subimage(rect, image):
x, y, w, h = rect
return image[y:y + h, x:x + w]
def pad_bin_image_to_shape(image, shape):
"""
Padd image to size :shape: with zeros
"""
h, w = shape
ih, iw = image.shape
assert ih <= h
assert iw <= w
if iw < w:
result = numpy.hstack((image, numpy.zeros((ih, w - iw), bool)))
else:
result = image
if ih < h:
result = numpy.vstack((result, numpy.zeros((h - ih, w), bool)))
return result
def sum_2d_images(images):
it = iter(images)
result = numpy.copy(next(it))
for image in it:
result += image
return result
OVERLAP_TABLE = {
16: (16, 8, 4, 2, 1),
15: (15, 5, 3, 1),
12: (12, 6, 4, 3, 2, 1),
10: (10, 5, 2, 1),
9: (9, 3, 1),
8: (8, 4, 2, 1),
6: (6, 3, 2, 1),
5: (5, 1),
4: (4, 2, 1),
3: (3, 1),
2: (2, 1)
}
# A number near float max which we don't want to get near to keep precision
# If you get false matches consider reducing this number.
ACCURACY_LIMIT = 2 ** (64 - 23)
def best_convolution(bin_template, bin_image,
tollerance=0.5, overlap_table=OVERLAP_TABLE):
"""
Selects and applies the best convolution method to find template in image.
Returns a list of matches in (width, height, x offset, y offset)
format (where the x and y offsets are from the top left corner).
As the images are binary images, we can utilise the extra bit space in the
float64's by cutting the image into tiles and stacking them into variable
grayscale values.
This allows converting a sparse binary image into a dense(r) grayscale one.
"""
template_sum = numpy.count_nonzero(bin_template)
th, tw = bin_template.shape
ih, iw = bin_image.shape
if template_sum == 0 or th == 0 or tw == 0:
# If we don't have a template
return []
if th > ih or tw > iw:
# If the template is bigger than the image
return []
# How many cells can we split the image into?
max_vert_cells = ih // th
max_hor_cells = iw // th
# Try to work out how many times we can stack the image
usable_factors = {n: factors for n, factors in overlap_table.iteritems()
if ((template_sum + 1) ** (n)) < ACCURACY_LIMIT}
overlap_options = [(factor, n // factor)
for n, factors in usable_factors.iteritems()
for factor in factors
if (factor <= max_vert_cells and
n // factor <= max_hor_cells)]
if not overlap_options:
# We can't stack the image
return convolution(bin_template, bin_image, tollerance=tollerance)
best_overlap = min(overlap_options,
key=lambda x: ((ih // x[0] + th) * (iw // x[1] + tw)))
return overlapped_convolution(bin_template, bin_image,
tollerance=tollerance, splits=best_overlap)
def convolution(bin_template, bin_image, tollerance=0.5):
expected = numpy.count_nonzero(bin_template)
ih, iw = bin_image.shape
th, tw = bin_template.shape
# Padd image to even dimensions
if ih % 2 or iw % 2:
if ih % 2:
ih += 1
if iw % 2:
iw += 1
bin_image = pad_bin_image_to_shape(bin_image, (ih, iw))
if expected == 0:
return []
# Calculate the convolution of the FFT's of the image & template
convolution_freqs = rfft2(bin_image) * rfft2(bin_template[::-1, ::-1],
bin_image.shape)
# Reverse the FFT to find the result image
convolution_image = irfft2(convolution_freqs)
# At this point, the maximum point in convolution_image should be the
# bottom right (why?) of the area of greatest match
# The areas in the result image within expected +- tollerance are where we
# saw matches
found_bitmap = ((convolution_image > (expected - tollerance)) &
(convolution_image < (expected + tollerance)))
match_points = numpy.transpose(numpy.nonzero(found_bitmap)) # bottom right
# Find the top left point from the template (remember match_point is
# inside the template (hence -1)
return [((fx - (tw - 1)), (fy - (th - 1))) for (fy, fx) in match_points]
def overlapped_convolution(bin_template, bin_image,
tollerance=0.5, splits=(4, 2)):
"""
As each of these images are hold only binary values, and RFFT2 works on
float64 greyscale values, we can make the convolution more efficient by
breaking the image up into :splits: sectons. Each one of these sections
then has its greyscale value adjusted and then stacked.
We then apply the convolution to this 'stack' of images, and adjust the
resultant position matches.
"""
th, tw = bin_template.shape
ih, iw = bin_image.shape
hs, ws = splits
h = ih // hs
w = iw // ws
count = numpy.count_nonzero(bin_template)
assert count > 0
assert h >= th
assert w >= tw
yoffset = [(i * h, ((i + 1) * h) + (th - 1)) for i in range(hs)]
xoffset = [(i * w, ((i + 1) * w) + (tw - 1)) for i in range(ws)]
# image_stacks is Origin (x,y), array, z (height in stack)
image_stacks = [((x1, y1), bin_image[y1:y2, x1:x2], float((count + 1) ** (num)))
for num, (x1, x2, y1, y2) in
enumerate((x1, x2, y1, y2) for (x1, x2)
in xoffset for (y1, y2) in yoffset)]
pad_h = max(i.shape[0] for _, i, _ in image_stacks)
pad_w = max(i.shape[1] for _, i, _ in image_stacks)
# rfft metrics must be an even size - why ... maths?
pad_w += pad_w % 2
pad_h += pad_h % 2
overlapped_image = sum_2d_images(pad_bin_image_to_shape(i, (pad_h, pad_w))
* num for _, i, num in image_stacks)
#print "Overlap splits %r, Image Size (%d,%d),
#Overlapped Size (%d,%d)" % (splits,iw,ih,pad_w,pad_h)
# Calculate the convolution of the FFT's of the overlapped image & template
convolution_freqs = (rfft2(overlapped_image) *
rfft2(bin_template[::-1, ::-1],
overlapped_image.shape))
# Reverse the FFT to find the result overlapped image
convolution_image = irfft2(convolution_freqs)
# At this point, the maximum point in convolution_image should be the
# bottom right (why?) of the area of greatest match
results = set()
for (x, y), _, num in image_stacks[::-1]:
test = convolution_image / num
filtered = ((test >= (count - tollerance)) &
(test <= (count + tollerance)))
match_points = numpy.transpose(numpy.nonzero(filtered)) # bottom right
for (fy, fx) in match_points:
if fx < (tw - 1) or fy < (th - 1):
continue
results.add((x + fx - (tw - 1), y + fy - (th - 1)))
convolution_image %= num
return list(results)
def get_possible_convolution_regions(bin_template, bin_image,
tollerance=0.5, rescale=10):
result = []
h, w = bin_template.shape[:2]
rects = set((x, y, w, h) for x, y in convolution_r3m_targets(
bin_template, bin_image, tollerance))
ih, iw = bin_image.shape[:2]
# areas of interest
aoi = numpy.zeros((ih // rescale + 1, iw // rescale + 1), bool)
for x, y, w, h in rects:
aoi[y // rescale:(y + h) // rescale + 1,
x // rescale:(x + w) // rescale + 1] = True
bp = binary_partition_image(aoi, min_w=w // rescale, min_h=h // rescale)
if bp:
bp = prune_unbeneficial_partitions(aoi, bp)
return result
def binary_partition_to_rects(bp, image, template_w, template_h,
xoffset=0, yoffset=0):
h, w = image.shape[2:]
if bp is None:
return [(xoffset, yoffset, w, h)]
pos, axis, (p1, p2) = bp
if axis == 0:
new_xoffset, new_yoffset = xoffset, yoffset + pos
i1, i2 = image[pos:], image[:pos]
else:
new_xoffset, new_yoffset = xoffset + pos, yoffset
i1, i2 = image[:, pos:], image[:, :pos]
def prune_unbeneficial_partitions(image, bp):
pos, axis, (p1, p2) = bp
if axis == 0:
i1, i2 = image[pos:], image[:pos]
else:
i1, i2 = image[:, pos:], image[:, :pos]
if p1 is None:
p1_result = numpy.count_nonzero(i1) == 0
else:
p1_result = prune_unbeneficial_partitions(i1, p1)
if p2 is None:
p2_result = numpy.count_nonzero(i2) == 0
else:
p2_result = prune_unbeneficial_partitions(i2, p2)
if p1_result or p2_result:
return [pos,
axis,
[None if p1_result in [True, False] else p1_result,
None if p2_result in [True, False] else p2_result]]
else:
return None
def get_partition_scores(image, min_w=1, min_h=1):
"""Return list of best to worst binary splits along the x and y axis.
"""
h, w = image.shape[:2]
if w == 0 or h == 0:
return []
area = h * w
cnz = numpy.count_nonzero
total = cnz(image)
if total == 0 or area == total:
return []
if h < min_h * 2:
y_c = []
else:
y_c = [(-abs((count / ((h - y) * w)) - ((total - count) / (y * w))),
y, 0)
for count, y in ((cnz(image[y:]), y)
for y in range(min_h, image.shape[0] - min_h))]
if w < min_w * 2:
x_c = []
else:
x_c = [(-abs((count / (h * (w - x))) - ((total - count) / (h * x))),
x, 1)
for count, x in ((cnz(image[:, x:]), x)
for x in range(min_w, image.shape[1] - min_w))]
return sorted(x_c + y_c)
def get_best_partition(image, min_w=1, min_h=1):
partitions = get_partition_scores(image, min_w=min_w, min_h=min_h)
if partitions:
return partitions[0][-2:]
else:
return None
def binary_partition_image(image, min_w=1, min_h=1, depth=0, max_depth=-1):
"""Return a bsp of [pos, axis, [before_node, after_node]] nodes where leaf
nodes == None.
If max_depth < 0 this function will continue until all leaf nodes have
been found, if it is >= 0 leaf nodes will be created at that depth.
min_w and min_h are the minimum width or height of a partition.
"""
if max_depth >= 0 and depth >= max_depth:
return None
partition = get_best_partition(image, min_w=min_w, min_h=min_h)
if partition is None:
return None
pos, axis = partition
if axis == 0:
p1 = binary_partition_image(
image[pos:], min_w, min_h, depth + 1, max_depth)
p2 = binary_partition_image(
image[:pos], min_w, min_h, depth + 1, max_depth)
elif axis == 1:
p1 = binary_partition_image(
image[:, pos:], min_w, min_h, depth + 1, max_depth)
p2 = binary_partition_image(
image[:, :pos], min_w, min_h, depth + 1, max_depth)
return [pos, axis, [p1, p2]]
def draw_binary_partition(image, subdiv, res_image=None, counter=None):
if res_image is None:
res_image = numpy.zeros(image.shape)
if counter is None:
counter = itertools.count(15)
pos, axis, (p1, p2) = subdiv
if axis == 0:
s1, s2 = res_image[pos:], res_image[:pos]
i1, i2 = image[pos:], image[:pos]
else:
s1, s2 = res_image[:, pos:], res_image[:, :pos]
i1, i2 = image[:, pos:], image[:, :pos]
if p1 is None:
if numpy.count_nonzero(i1):
s1[:] = next(counter)
else:
draw_binary_partition(i1, p1, s1, counter)
if p2 is None:
if numpy.count_nonzero(i2):
s2[:] = next(counter)
else:
draw_binary_partition(i2, p2, s2, counter)
return res_image
def rescale2avg(image):
sub1 = image[:-1:2, :-1:2]
sub2 = image[1::2, 1::2]
res = numpy.zeros(sub1.shape, numpy.uint32)
res += sub1
res += sub2
res /= 2
return res.astype(numpy.uint8)
def rescale2max(image):
sub1 = image[:-1:2, :-1:2]
sub2 = image[1::2, 1::2]
if len(image.shape) == 3:
max_map = grey_scale(sub1) > grey_scale(sub2)
else:
max_map = sub1 > sub2
inv_max_map = max_map == False
res = numpy.zeros(sub1.shape, numpy.uint8)
res[max_map] = sub1[max_map]
res[inv_max_map] = sub2[inv_max_map]
return res
def rescale3avg(image):
sub1 = image[:-2:3, :-2:3]
sub2 = image[1:-1:3, 1:-1:3]
sub3 = image[2::3, 2::3]
res = numpy.zeros(sub1.shape, numpy.uint32)
res += sub1
res += sub2
res += sub3
res /= 3
return res.astype(numpy.uint8)
def rescale3max(image):
sub1 = image[:-2:3, :-2:3]
sub2 = image[1:-1:3, 1:-1:3]
sub3 = image[2::3, 2::3]
if len(image.shape) == 3:
grey1 = grey_scale(sub1)
grey2 = grey_scale(sub2)
grey3 = grey_scale(sub3)
else:
grey1, grey2, grey3 = sub1, sub2, sub3
max_map_1 = (grey1 > grey2) & (grey1 > grey3)
max_map_2 = (grey2 > grey1) & (grey2 > grey3)
max_map_3 = (max_map_1 | max_map_2) == False
res = numpy.zeros(sub1.shape, numpy.uint8)
res[max_map_1] = sub1[max_map_1]
res[max_map_2] = sub2[max_map_2]
res[max_map_3] = sub3[max_map_3]
return res
def or_reduce_rescale3max_offset(image):
return reduce(operator.or_,
(rescale3max(image[y1:y2, x1:x2]) for (y1, y2), (x1, x2)
in itertools.product(
*[[(i, -(3 - i)) for i in range(3)]] * 2)
)
)
def numpy_or_all(images):
it = iter(images)
result = numpy.copy(next(it))
for image in it:
result |= image
return result
def grey_scale(image):
"""Converts RGB image to Greyscale
:param image: input image
:type image: 3 channel 3d :class:`numpy.ndarray`
:rtype: :class:`numpy.ndarray`
"""
return image.astype(numpy.int32).sum(axis=2) // 3
def find_edges(image):
base = image[:, :, numpy.newaxis].astype(numpy.int16)
c = base[1:-1, 1:-1]
e = base[1:-1, :-2]
w = base[1:-1, 2:]
s = base[:-2, 1:-1]
n = base[2:, 1:-1]
diffs = numpy.concatenate([c-e, c-w, c-s, c-n], axis=2).max(axis=2)
diffs[diffs < 0] = 0
dh, dw = diffs.shape
col = numpy.zeros((dh, 1), numpy.uint8)
row = numpy.zeros((1, dw+2), numpy.uint8)
return numpy.vstack(
(row, numpy.hstack((col, diffs.astype(numpy.uint8), col)), row)
)
def find_threshold_near_density(img, density, low=0, high=255):
"""Find a threshold where the fraction of pixels above the threshold
is closest to density where density is (count of pixels above
threshold / count of pixels).
The highest threshold closest to the desired density will be returned.
Use low and high to exclude undesirable thresholds.
:param img: target image
:type img: 2d :class:`numpy.ndarray`
:param density: target density
:type density: float between 0.0 and 1.0
:param low: min threshold to test
:type low: ubyte
:param migh: max threshold to test
:type low: ubyte
:rtype: ubyte
"""
size = numpy.size(img)
densities = []
last_t = None
while True:
t = ((high - low) // 2) + low
if t == last_t:
densities.sort(key=lambda x: (abs(x[0] - density), 256 - x[1]))
return densities[0][1]
else:
last_t = t
d = numpy.count_nonzero(img > t) / size
densities.append((d, t))
if d < density:
high = t
elif d >= density: # search away from low
low = t
def filter_greys_using_image(image, target):
"""Filter out any values in target not in image
:param image: image containing values to appear in filtered image
:param target: the image to filter
:rtype: 2d :class:`numpy.ndarray` containing only value in image
and with the same dimensions as target
"""
maskbase = numpy.array(range(256), dtype=numpy.uint8)
mask = numpy.where(numpy.in1d(maskbase, numpy.unique(image)), maskbase, 0)
return mask[target]
def correlation_coefficient_normed(template, image):
h, w = template.shape[:2]
H, W = image.shape[:2]
template_size = template.size
template_distance = template - (template.sum() / template_size)
corr_num = numpy.zeros((H, W), numpy.float64)
corr_denum = numpy.zeros((H, W), numpy.float64)
for y in xrange(H):
for x in xrange(W):
image_in_template_area = image[y:y + h, x:x + w]
image_distance_of_template_area = (
image_in_template_area - (
image_in_template_area.sum() /
template_size
)
)
I_H, I_W = image_distance_of_template_area.shape[:2]
sum_of_template_by_image_distance_at_xy = (
image_distance_of_template_area * template_distance[:I_H, :I_W]
).sum()
corr_num[y, x] = sum_of_template_by_image_distance_at_xy
corr_denum[y, x] = numpy.sqrt(
(template_distance ** 2).sum() *
(image_distance_of_template_area ** 2).sum()
)
print y
return corr_num / corr_denum
|
ten10solutions/Geist
|
geist/vision.py
|
best_convolution
|
python
|
def best_convolution(bin_template, bin_image,
tollerance=0.5, overlap_table=OVERLAP_TABLE):
"""
Selects and applies the best convolution method to find template in image.
Returns a list of matches in (width, height, x offset, y offset)
format (where the x and y offsets are from the top left corner).
As the images are binary images, we can utilise the extra bit space in the
float64's by cutting the image into tiles and stacking them into variable
grayscale values.
This allows converting a sparse binary image into a dense(r) grayscale one.
"""
template_sum = numpy.count_nonzero(bin_template)
th, tw = bin_template.shape
ih, iw = bin_image.shape
if template_sum == 0 or th == 0 or tw == 0:
# If we don't have a template
return []
if th > ih or tw > iw:
# If the template is bigger than the image
return []
# How many cells can we split the image into?
max_vert_cells = ih // th
max_hor_cells = iw // th
# Try to work out how many times we can stack the image
usable_factors = {n: factors for n, factors in overlap_table.iteritems()
if ((template_sum + 1) ** (n)) < ACCURACY_LIMIT}
overlap_options = [(factor, n // factor)
for n, factors in usable_factors.iteritems()
for factor in factors
if (factor <= max_vert_cells and
n // factor <= max_hor_cells)]
if not overlap_options:
# We can't stack the image
return convolution(bin_template, bin_image, tollerance=tollerance)
best_overlap = min(overlap_options,
key=lambda x: ((ih // x[0] + th) * (iw // x[1] + tw)))
return overlapped_convolution(bin_template, bin_image,
tollerance=tollerance, splits=best_overlap)
|
Selects and applies the best convolution method to find template in image.
Returns a list of matches in (width, height, x offset, y offset)
format (where the x and y offsets are from the top left corner).
As the images are binary images, we can utilise the extra bit space in the
float64's by cutting the image into tiles and stacking them into variable
grayscale values.
This allows converting a sparse binary image into a dense(r) grayscale one.
|
train
|
https://github.com/ten10solutions/Geist/blob/a1ef16d8b4c3777735008b671a50acfde3ce7bf1/geist/vision.py#L57-L100
| null |
from __future__ import division
from numpy.fft import irfft2, rfft2
import numpy
import itertools
import operator
def subimage(rect, image):
x, y, w, h = rect
return image[y:y + h, x:x + w]
def pad_bin_image_to_shape(image, shape):
"""
Padd image to size :shape: with zeros
"""
h, w = shape
ih, iw = image.shape
assert ih <= h
assert iw <= w
if iw < w:
result = numpy.hstack((image, numpy.zeros((ih, w - iw), bool)))
else:
result = image
if ih < h:
result = numpy.vstack((result, numpy.zeros((h - ih, w), bool)))
return result
def sum_2d_images(images):
it = iter(images)
result = numpy.copy(next(it))
for image in it:
result += image
return result
OVERLAP_TABLE = {
16: (16, 8, 4, 2, 1),
15: (15, 5, 3, 1),
12: (12, 6, 4, 3, 2, 1),
10: (10, 5, 2, 1),
9: (9, 3, 1),
8: (8, 4, 2, 1),
6: (6, 3, 2, 1),
5: (5, 1),
4: (4, 2, 1),
3: (3, 1),
2: (2, 1)
}
# A number near float max which we don't want to get near to keep precision
# If you get false matches consider reducing this number.
ACCURACY_LIMIT = 2 ** (64 - 23)
def best_convolution(bin_template, bin_image,
tollerance=0.5, overlap_table=OVERLAP_TABLE):
"""
Selects and applies the best convolution method to find template in image.
Returns a list of matches in (width, height, x offset, y offset)
format (where the x and y offsets are from the top left corner).
As the images are binary images, we can utilise the extra bit space in the
float64's by cutting the image into tiles and stacking them into variable
grayscale values.
This allows converting a sparse binary image into a dense(r) grayscale one.
"""
template_sum = numpy.count_nonzero(bin_template)
th, tw = bin_template.shape
ih, iw = bin_image.shape
if template_sum == 0 or th == 0 or tw == 0:
# If we don't have a template
return []
if th > ih or tw > iw:
# If the template is bigger than the image
return []
# How many cells can we split the image into?
max_vert_cells = ih // th
max_hor_cells = iw // th
# Try to work out how many times we can stack the image
usable_factors = {n: factors for n, factors in overlap_table.iteritems()
if ((template_sum + 1) ** (n)) < ACCURACY_LIMIT}
overlap_options = [(factor, n // factor)
for n, factors in usable_factors.iteritems()
for factor in factors
if (factor <= max_vert_cells and
n // factor <= max_hor_cells)]
if not overlap_options:
# We can't stack the image
return convolution(bin_template, bin_image, tollerance=tollerance)
best_overlap = min(overlap_options,
key=lambda x: ((ih // x[0] + th) * (iw // x[1] + tw)))
return overlapped_convolution(bin_template, bin_image,
tollerance=tollerance, splits=best_overlap)
def convolution(bin_template, bin_image, tollerance=0.5):
expected = numpy.count_nonzero(bin_template)
ih, iw = bin_image.shape
th, tw = bin_template.shape
# Padd image to even dimensions
if ih % 2 or iw % 2:
if ih % 2:
ih += 1
if iw % 2:
iw += 1
bin_image = pad_bin_image_to_shape(bin_image, (ih, iw))
if expected == 0:
return []
# Calculate the convolution of the FFT's of the image & template
convolution_freqs = rfft2(bin_image) * rfft2(bin_template[::-1, ::-1],
bin_image.shape)
# Reverse the FFT to find the result image
convolution_image = irfft2(convolution_freqs)
# At this point, the maximum point in convolution_image should be the
# bottom right (why?) of the area of greatest match
# The areas in the result image within expected +- tollerance are where we
# saw matches
found_bitmap = ((convolution_image > (expected - tollerance)) &
(convolution_image < (expected + tollerance)))
match_points = numpy.transpose(numpy.nonzero(found_bitmap)) # bottom right
# Find the top left point from the template (remember match_point is
# inside the template (hence -1)
return [((fx - (tw - 1)), (fy - (th - 1))) for (fy, fx) in match_points]
def overlapped_convolution(bin_template, bin_image,
tollerance=0.5, splits=(4, 2)):
"""
As each of these images are hold only binary values, and RFFT2 works on
float64 greyscale values, we can make the convolution more efficient by
breaking the image up into :splits: sectons. Each one of these sections
then has its greyscale value adjusted and then stacked.
We then apply the convolution to this 'stack' of images, and adjust the
resultant position matches.
"""
th, tw = bin_template.shape
ih, iw = bin_image.shape
hs, ws = splits
h = ih // hs
w = iw // ws
count = numpy.count_nonzero(bin_template)
assert count > 0
assert h >= th
assert w >= tw
yoffset = [(i * h, ((i + 1) * h) + (th - 1)) for i in range(hs)]
xoffset = [(i * w, ((i + 1) * w) + (tw - 1)) for i in range(ws)]
# image_stacks is Origin (x,y), array, z (height in stack)
image_stacks = [((x1, y1), bin_image[y1:y2, x1:x2], float((count + 1) ** (num)))
for num, (x1, x2, y1, y2) in
enumerate((x1, x2, y1, y2) for (x1, x2)
in xoffset for (y1, y2) in yoffset)]
pad_h = max(i.shape[0] for _, i, _ in image_stacks)
pad_w = max(i.shape[1] for _, i, _ in image_stacks)
# rfft metrics must be an even size - why ... maths?
pad_w += pad_w % 2
pad_h += pad_h % 2
overlapped_image = sum_2d_images(pad_bin_image_to_shape(i, (pad_h, pad_w))
* num for _, i, num in image_stacks)
#print "Overlap splits %r, Image Size (%d,%d),
#Overlapped Size (%d,%d)" % (splits,iw,ih,pad_w,pad_h)
# Calculate the convolution of the FFT's of the overlapped image & template
convolution_freqs = (rfft2(overlapped_image) *
rfft2(bin_template[::-1, ::-1],
overlapped_image.shape))
# Reverse the FFT to find the result overlapped image
convolution_image = irfft2(convolution_freqs)
# At this point, the maximum point in convolution_image should be the
# bottom right (why?) of the area of greatest match
results = set()
for (x, y), _, num in image_stacks[::-1]:
test = convolution_image / num
filtered = ((test >= (count - tollerance)) &
(test <= (count + tollerance)))
match_points = numpy.transpose(numpy.nonzero(filtered)) # bottom right
for (fy, fx) in match_points:
if fx < (tw - 1) or fy < (th - 1):
continue
results.add((x + fx - (tw - 1), y + fy - (th - 1)))
convolution_image %= num
return list(results)
def get_possible_convolution_regions(bin_template, bin_image,
tollerance=0.5, rescale=10):
result = []
h, w = bin_template.shape[:2]
rects = set((x, y, w, h) for x, y in convolution_r3m_targets(
bin_template, bin_image, tollerance))
ih, iw = bin_image.shape[:2]
# areas of interest
aoi = numpy.zeros((ih // rescale + 1, iw // rescale + 1), bool)
for x, y, w, h in rects:
aoi[y // rescale:(y + h) // rescale + 1,
x // rescale:(x + w) // rescale + 1] = True
bp = binary_partition_image(aoi, min_w=w // rescale, min_h=h // rescale)
if bp:
bp = prune_unbeneficial_partitions(aoi, bp)
return result
def binary_partition_to_rects(bp, image, template_w, template_h,
xoffset=0, yoffset=0):
h, w = image.shape[2:]
if bp is None:
return [(xoffset, yoffset, w, h)]
pos, axis, (p1, p2) = bp
if axis == 0:
new_xoffset, new_yoffset = xoffset, yoffset + pos
i1, i2 = image[pos:], image[:pos]
else:
new_xoffset, new_yoffset = xoffset + pos, yoffset
i1, i2 = image[:, pos:], image[:, :pos]
def prune_unbeneficial_partitions(image, bp):
pos, axis, (p1, p2) = bp
if axis == 0:
i1, i2 = image[pos:], image[:pos]
else:
i1, i2 = image[:, pos:], image[:, :pos]
if p1 is None:
p1_result = numpy.count_nonzero(i1) == 0
else:
p1_result = prune_unbeneficial_partitions(i1, p1)
if p2 is None:
p2_result = numpy.count_nonzero(i2) == 0
else:
p2_result = prune_unbeneficial_partitions(i2, p2)
if p1_result or p2_result:
return [pos,
axis,
[None if p1_result in [True, False] else p1_result,
None if p2_result in [True, False] else p2_result]]
else:
return None
def get_partition_scores(image, min_w=1, min_h=1):
"""Return list of best to worst binary splits along the x and y axis.
"""
h, w = image.shape[:2]
if w == 0 or h == 0:
return []
area = h * w
cnz = numpy.count_nonzero
total = cnz(image)
if total == 0 or area == total:
return []
if h < min_h * 2:
y_c = []
else:
y_c = [(-abs((count / ((h - y) * w)) - ((total - count) / (y * w))),
y, 0)
for count, y in ((cnz(image[y:]), y)
for y in range(min_h, image.shape[0] - min_h))]
if w < min_w * 2:
x_c = []
else:
x_c = [(-abs((count / (h * (w - x))) - ((total - count) / (h * x))),
x, 1)
for count, x in ((cnz(image[:, x:]), x)
for x in range(min_w, image.shape[1] - min_w))]
return sorted(x_c + y_c)
def get_best_partition(image, min_w=1, min_h=1):
partitions = get_partition_scores(image, min_w=min_w, min_h=min_h)
if partitions:
return partitions[0][-2:]
else:
return None
def binary_partition_image(image, min_w=1, min_h=1, depth=0, max_depth=-1):
"""Return a bsp of [pos, axis, [before_node, after_node]] nodes where leaf
nodes == None.
If max_depth < 0 this function will continue until all leaf nodes have
been found, if it is >= 0 leaf nodes will be created at that depth.
min_w and min_h are the minimum width or height of a partition.
"""
if max_depth >= 0 and depth >= max_depth:
return None
partition = get_best_partition(image, min_w=min_w, min_h=min_h)
if partition is None:
return None
pos, axis = partition
if axis == 0:
p1 = binary_partition_image(
image[pos:], min_w, min_h, depth + 1, max_depth)
p2 = binary_partition_image(
image[:pos], min_w, min_h, depth + 1, max_depth)
elif axis == 1:
p1 = binary_partition_image(
image[:, pos:], min_w, min_h, depth + 1, max_depth)
p2 = binary_partition_image(
image[:, :pos], min_w, min_h, depth + 1, max_depth)
return [pos, axis, [p1, p2]]
def draw_binary_partition(image, subdiv, res_image=None, counter=None):
if res_image is None:
res_image = numpy.zeros(image.shape)
if counter is None:
counter = itertools.count(15)
pos, axis, (p1, p2) = subdiv
if axis == 0:
s1, s2 = res_image[pos:], res_image[:pos]
i1, i2 = image[pos:], image[:pos]
else:
s1, s2 = res_image[:, pos:], res_image[:, :pos]
i1, i2 = image[:, pos:], image[:, :pos]
if p1 is None:
if numpy.count_nonzero(i1):
s1[:] = next(counter)
else:
draw_binary_partition(i1, p1, s1, counter)
if p2 is None:
if numpy.count_nonzero(i2):
s2[:] = next(counter)
else:
draw_binary_partition(i2, p2, s2, counter)
return res_image
def rescale2avg(image):
sub1 = image[:-1:2, :-1:2]
sub2 = image[1::2, 1::2]
res = numpy.zeros(sub1.shape, numpy.uint32)
res += sub1
res += sub2
res /= 2
return res.astype(numpy.uint8)
def rescale2max(image):
sub1 = image[:-1:2, :-1:2]
sub2 = image[1::2, 1::2]
if len(image.shape) == 3:
max_map = grey_scale(sub1) > grey_scale(sub2)
else:
max_map = sub1 > sub2
inv_max_map = max_map == False
res = numpy.zeros(sub1.shape, numpy.uint8)
res[max_map] = sub1[max_map]
res[inv_max_map] = sub2[inv_max_map]
return res
def rescale3avg(image):
sub1 = image[:-2:3, :-2:3]
sub2 = image[1:-1:3, 1:-1:3]
sub3 = image[2::3, 2::3]
res = numpy.zeros(sub1.shape, numpy.uint32)
res += sub1
res += sub2
res += sub3
res /= 3
return res.astype(numpy.uint8)
def rescale3max(image):
sub1 = image[:-2:3, :-2:3]
sub2 = image[1:-1:3, 1:-1:3]
sub3 = image[2::3, 2::3]
if len(image.shape) == 3:
grey1 = grey_scale(sub1)
grey2 = grey_scale(sub2)
grey3 = grey_scale(sub3)
else:
grey1, grey2, grey3 = sub1, sub2, sub3
max_map_1 = (grey1 > grey2) & (grey1 > grey3)
max_map_2 = (grey2 > grey1) & (grey2 > grey3)
max_map_3 = (max_map_1 | max_map_2) == False
res = numpy.zeros(sub1.shape, numpy.uint8)
res[max_map_1] = sub1[max_map_1]
res[max_map_2] = sub2[max_map_2]
res[max_map_3] = sub3[max_map_3]
return res
def or_reduce_rescale3max_offset(image):
return reduce(operator.or_,
(rescale3max(image[y1:y2, x1:x2]) for (y1, y2), (x1, x2)
in itertools.product(
*[[(i, -(3 - i)) for i in range(3)]] * 2)
)
)
def numpy_or_all(images):
it = iter(images)
result = numpy.copy(next(it))
for image in it:
result |= image
return result
def grey_scale(image):
"""Converts RGB image to Greyscale
:param image: input image
:type image: 3 channel 3d :class:`numpy.ndarray`
:rtype: :class:`numpy.ndarray`
"""
return image.astype(numpy.int32).sum(axis=2) // 3
def find_edges(image):
base = image[:, :, numpy.newaxis].astype(numpy.int16)
c = base[1:-1, 1:-1]
e = base[1:-1, :-2]
w = base[1:-1, 2:]
s = base[:-2, 1:-1]
n = base[2:, 1:-1]
diffs = numpy.concatenate([c-e, c-w, c-s, c-n], axis=2).max(axis=2)
diffs[diffs < 0] = 0
dh, dw = diffs.shape
col = numpy.zeros((dh, 1), numpy.uint8)
row = numpy.zeros((1, dw+2), numpy.uint8)
return numpy.vstack(
(row, numpy.hstack((col, diffs.astype(numpy.uint8), col)), row)
)
def find_threshold_near_density(img, density, low=0, high=255):
"""Find a threshold where the fraction of pixels above the threshold
is closest to density where density is (count of pixels above
threshold / count of pixels).
The highest threshold closest to the desired density will be returned.
Use low and high to exclude undesirable thresholds.
:param img: target image
:type img: 2d :class:`numpy.ndarray`
:param density: target density
:type density: float between 0.0 and 1.0
:param low: min threshold to test
:type low: ubyte
:param migh: max threshold to test
:type low: ubyte
:rtype: ubyte
"""
size = numpy.size(img)
densities = []
last_t = None
while True:
t = ((high - low) // 2) + low
if t == last_t:
densities.sort(key=lambda x: (abs(x[0] - density), 256 - x[1]))
return densities[0][1]
else:
last_t = t
d = numpy.count_nonzero(img > t) / size
densities.append((d, t))
if d < density:
high = t
elif d >= density: # search away from low
low = t
def filter_greys_using_image(image, target):
"""Filter out any values in target not in image
:param image: image containing values to appear in filtered image
:param target: the image to filter
:rtype: 2d :class:`numpy.ndarray` containing only value in image
and with the same dimensions as target
"""
maskbase = numpy.array(range(256), dtype=numpy.uint8)
mask = numpy.where(numpy.in1d(maskbase, numpy.unique(image)), maskbase, 0)
return mask[target]
def correlation_coefficient_normed(template, image):
h, w = template.shape[:2]
H, W = image.shape[:2]
template_size = template.size
template_distance = template - (template.sum() / template_size)
corr_num = numpy.zeros((H, W), numpy.float64)
corr_denum = numpy.zeros((H, W), numpy.float64)
for y in xrange(H):
for x in xrange(W):
image_in_template_area = image[y:y + h, x:x + w]
image_distance_of_template_area = (
image_in_template_area - (
image_in_template_area.sum() /
template_size
)
)
I_H, I_W = image_distance_of_template_area.shape[:2]
sum_of_template_by_image_distance_at_xy = (
image_distance_of_template_area * template_distance[:I_H, :I_W]
).sum()
corr_num[y, x] = sum_of_template_by_image_distance_at_xy
corr_denum[y, x] = numpy.sqrt(
(template_distance ** 2).sum() *
(image_distance_of_template_area ** 2).sum()
)
print y
return corr_num / corr_denum
|
ten10solutions/Geist
|
geist/vision.py
|
overlapped_convolution
|
python
|
def overlapped_convolution(bin_template, bin_image,
tollerance=0.5, splits=(4, 2)):
"""
As each of these images are hold only binary values, and RFFT2 works on
float64 greyscale values, we can make the convolution more efficient by
breaking the image up into :splits: sectons. Each one of these sections
then has its greyscale value adjusted and then stacked.
We then apply the convolution to this 'stack' of images, and adjust the
resultant position matches.
"""
th, tw = bin_template.shape
ih, iw = bin_image.shape
hs, ws = splits
h = ih // hs
w = iw // ws
count = numpy.count_nonzero(bin_template)
assert count > 0
assert h >= th
assert w >= tw
yoffset = [(i * h, ((i + 1) * h) + (th - 1)) for i in range(hs)]
xoffset = [(i * w, ((i + 1) * w) + (tw - 1)) for i in range(ws)]
# image_stacks is Origin (x,y), array, z (height in stack)
image_stacks = [((x1, y1), bin_image[y1:y2, x1:x2], float((count + 1) ** (num)))
for num, (x1, x2, y1, y2) in
enumerate((x1, x2, y1, y2) for (x1, x2)
in xoffset for (y1, y2) in yoffset)]
pad_h = max(i.shape[0] for _, i, _ in image_stacks)
pad_w = max(i.shape[1] for _, i, _ in image_stacks)
# rfft metrics must be an even size - why ... maths?
pad_w += pad_w % 2
pad_h += pad_h % 2
overlapped_image = sum_2d_images(pad_bin_image_to_shape(i, (pad_h, pad_w))
* num for _, i, num in image_stacks)
#print "Overlap splits %r, Image Size (%d,%d),
#Overlapped Size (%d,%d)" % (splits,iw,ih,pad_w,pad_h)
# Calculate the convolution of the FFT's of the overlapped image & template
convolution_freqs = (rfft2(overlapped_image) *
rfft2(bin_template[::-1, ::-1],
overlapped_image.shape))
# Reverse the FFT to find the result overlapped image
convolution_image = irfft2(convolution_freqs)
# At this point, the maximum point in convolution_image should be the
# bottom right (why?) of the area of greatest match
results = set()
for (x, y), _, num in image_stacks[::-1]:
test = convolution_image / num
filtered = ((test >= (count - tollerance)) &
(test <= (count + tollerance)))
match_points = numpy.transpose(numpy.nonzero(filtered)) # bottom right
for (fy, fx) in match_points:
if fx < (tw - 1) or fy < (th - 1):
continue
results.add((x + fx - (tw - 1), y + fy - (th - 1)))
convolution_image %= num
return list(results)
|
As each of these images are hold only binary values, and RFFT2 works on
float64 greyscale values, we can make the convolution more efficient by
breaking the image up into :splits: sectons. Each one of these sections
then has its greyscale value adjusted and then stacked.
We then apply the convolution to this 'stack' of images, and adjust the
resultant position matches.
|
train
|
https://github.com/ten10solutions/Geist/blob/a1ef16d8b4c3777735008b671a50acfde3ce7bf1/geist/vision.py#L138-L201
| null |
from __future__ import division
from numpy.fft import irfft2, rfft2
import numpy
import itertools
import operator
def subimage(rect, image):
x, y, w, h = rect
return image[y:y + h, x:x + w]
def pad_bin_image_to_shape(image, shape):
"""
Padd image to size :shape: with zeros
"""
h, w = shape
ih, iw = image.shape
assert ih <= h
assert iw <= w
if iw < w:
result = numpy.hstack((image, numpy.zeros((ih, w - iw), bool)))
else:
result = image
if ih < h:
result = numpy.vstack((result, numpy.zeros((h - ih, w), bool)))
return result
def sum_2d_images(images):
it = iter(images)
result = numpy.copy(next(it))
for image in it:
result += image
return result
OVERLAP_TABLE = {
16: (16, 8, 4, 2, 1),
15: (15, 5, 3, 1),
12: (12, 6, 4, 3, 2, 1),
10: (10, 5, 2, 1),
9: (9, 3, 1),
8: (8, 4, 2, 1),
6: (6, 3, 2, 1),
5: (5, 1),
4: (4, 2, 1),
3: (3, 1),
2: (2, 1)
}
# A number near float max which we don't want to get near to keep precision
# If you get false matches consider reducing this number.
ACCURACY_LIMIT = 2 ** (64 - 23)
def best_convolution(bin_template, bin_image,
tollerance=0.5, overlap_table=OVERLAP_TABLE):
"""
Selects and applies the best convolution method to find template in image.
Returns a list of matches in (width, height, x offset, y offset)
format (where the x and y offsets are from the top left corner).
As the images are binary images, we can utilise the extra bit space in the
float64's by cutting the image into tiles and stacking them into variable
grayscale values.
This allows converting a sparse binary image into a dense(r) grayscale one.
"""
template_sum = numpy.count_nonzero(bin_template)
th, tw = bin_template.shape
ih, iw = bin_image.shape
if template_sum == 0 or th == 0 or tw == 0:
# If we don't have a template
return []
if th > ih or tw > iw:
# If the template is bigger than the image
return []
# How many cells can we split the image into?
max_vert_cells = ih // th
max_hor_cells = iw // th
# Try to work out how many times we can stack the image
usable_factors = {n: factors for n, factors in overlap_table.iteritems()
if ((template_sum + 1) ** (n)) < ACCURACY_LIMIT}
overlap_options = [(factor, n // factor)
for n, factors in usable_factors.iteritems()
for factor in factors
if (factor <= max_vert_cells and
n // factor <= max_hor_cells)]
if not overlap_options:
# We can't stack the image
return convolution(bin_template, bin_image, tollerance=tollerance)
best_overlap = min(overlap_options,
key=lambda x: ((ih // x[0] + th) * (iw // x[1] + tw)))
return overlapped_convolution(bin_template, bin_image,
tollerance=tollerance, splits=best_overlap)
def convolution(bin_template, bin_image, tollerance=0.5):
expected = numpy.count_nonzero(bin_template)
ih, iw = bin_image.shape
th, tw = bin_template.shape
# Padd image to even dimensions
if ih % 2 or iw % 2:
if ih % 2:
ih += 1
if iw % 2:
iw += 1
bin_image = pad_bin_image_to_shape(bin_image, (ih, iw))
if expected == 0:
return []
# Calculate the convolution of the FFT's of the image & template
convolution_freqs = rfft2(bin_image) * rfft2(bin_template[::-1, ::-1],
bin_image.shape)
# Reverse the FFT to find the result image
convolution_image = irfft2(convolution_freqs)
# At this point, the maximum point in convolution_image should be the
# bottom right (why?) of the area of greatest match
# The areas in the result image within expected +- tollerance are where we
# saw matches
found_bitmap = ((convolution_image > (expected - tollerance)) &
(convolution_image < (expected + tollerance)))
match_points = numpy.transpose(numpy.nonzero(found_bitmap)) # bottom right
# Find the top left point from the template (remember match_point is
# inside the template (hence -1)
return [((fx - (tw - 1)), (fy - (th - 1))) for (fy, fx) in match_points]
def overlapped_convolution(bin_template, bin_image,
tollerance=0.5, splits=(4, 2)):
"""
As each of these images are hold only binary values, and RFFT2 works on
float64 greyscale values, we can make the convolution more efficient by
breaking the image up into :splits: sectons. Each one of these sections
then has its greyscale value adjusted and then stacked.
We then apply the convolution to this 'stack' of images, and adjust the
resultant position matches.
"""
th, tw = bin_template.shape
ih, iw = bin_image.shape
hs, ws = splits
h = ih // hs
w = iw // ws
count = numpy.count_nonzero(bin_template)
assert count > 0
assert h >= th
assert w >= tw
yoffset = [(i * h, ((i + 1) * h) + (th - 1)) for i in range(hs)]
xoffset = [(i * w, ((i + 1) * w) + (tw - 1)) for i in range(ws)]
# image_stacks is Origin (x,y), array, z (height in stack)
image_stacks = [((x1, y1), bin_image[y1:y2, x1:x2], float((count + 1) ** (num)))
for num, (x1, x2, y1, y2) in
enumerate((x1, x2, y1, y2) for (x1, x2)
in xoffset for (y1, y2) in yoffset)]
pad_h = max(i.shape[0] for _, i, _ in image_stacks)
pad_w = max(i.shape[1] for _, i, _ in image_stacks)
# rfft metrics must be an even size - why ... maths?
pad_w += pad_w % 2
pad_h += pad_h % 2
overlapped_image = sum_2d_images(pad_bin_image_to_shape(i, (pad_h, pad_w))
* num for _, i, num in image_stacks)
#print "Overlap splits %r, Image Size (%d,%d),
#Overlapped Size (%d,%d)" % (splits,iw,ih,pad_w,pad_h)
# Calculate the convolution of the FFT's of the overlapped image & template
convolution_freqs = (rfft2(overlapped_image) *
rfft2(bin_template[::-1, ::-1],
overlapped_image.shape))
# Reverse the FFT to find the result overlapped image
convolution_image = irfft2(convolution_freqs)
# At this point, the maximum point in convolution_image should be the
# bottom right (why?) of the area of greatest match
results = set()
for (x, y), _, num in image_stacks[::-1]:
test = convolution_image / num
filtered = ((test >= (count - tollerance)) &
(test <= (count + tollerance)))
match_points = numpy.transpose(numpy.nonzero(filtered)) # bottom right
for (fy, fx) in match_points:
if fx < (tw - 1) or fy < (th - 1):
continue
results.add((x + fx - (tw - 1), y + fy - (th - 1)))
convolution_image %= num
return list(results)
def get_possible_convolution_regions(bin_template, bin_image,
tollerance=0.5, rescale=10):
result = []
h, w = bin_template.shape[:2]
rects = set((x, y, w, h) for x, y in convolution_r3m_targets(
bin_template, bin_image, tollerance))
ih, iw = bin_image.shape[:2]
# areas of interest
aoi = numpy.zeros((ih // rescale + 1, iw // rescale + 1), bool)
for x, y, w, h in rects:
aoi[y // rescale:(y + h) // rescale + 1,
x // rescale:(x + w) // rescale + 1] = True
bp = binary_partition_image(aoi, min_w=w // rescale, min_h=h // rescale)
if bp:
bp = prune_unbeneficial_partitions(aoi, bp)
return result
def binary_partition_to_rects(bp, image, template_w, template_h,
xoffset=0, yoffset=0):
h, w = image.shape[2:]
if bp is None:
return [(xoffset, yoffset, w, h)]
pos, axis, (p1, p2) = bp
if axis == 0:
new_xoffset, new_yoffset = xoffset, yoffset + pos
i1, i2 = image[pos:], image[:pos]
else:
new_xoffset, new_yoffset = xoffset + pos, yoffset
i1, i2 = image[:, pos:], image[:, :pos]
def prune_unbeneficial_partitions(image, bp):
pos, axis, (p1, p2) = bp
if axis == 0:
i1, i2 = image[pos:], image[:pos]
else:
i1, i2 = image[:, pos:], image[:, :pos]
if p1 is None:
p1_result = numpy.count_nonzero(i1) == 0
else:
p1_result = prune_unbeneficial_partitions(i1, p1)
if p2 is None:
p2_result = numpy.count_nonzero(i2) == 0
else:
p2_result = prune_unbeneficial_partitions(i2, p2)
if p1_result or p2_result:
return [pos,
axis,
[None if p1_result in [True, False] else p1_result,
None if p2_result in [True, False] else p2_result]]
else:
return None
def get_partition_scores(image, min_w=1, min_h=1):
"""Return list of best to worst binary splits along the x and y axis.
"""
h, w = image.shape[:2]
if w == 0 or h == 0:
return []
area = h * w
cnz = numpy.count_nonzero
total = cnz(image)
if total == 0 or area == total:
return []
if h < min_h * 2:
y_c = []
else:
y_c = [(-abs((count / ((h - y) * w)) - ((total - count) / (y * w))),
y, 0)
for count, y in ((cnz(image[y:]), y)
for y in range(min_h, image.shape[0] - min_h))]
if w < min_w * 2:
x_c = []
else:
x_c = [(-abs((count / (h * (w - x))) - ((total - count) / (h * x))),
x, 1)
for count, x in ((cnz(image[:, x:]), x)
for x in range(min_w, image.shape[1] - min_w))]
return sorted(x_c + y_c)
def get_best_partition(image, min_w=1, min_h=1):
partitions = get_partition_scores(image, min_w=min_w, min_h=min_h)
if partitions:
return partitions[0][-2:]
else:
return None
def binary_partition_image(image, min_w=1, min_h=1, depth=0, max_depth=-1):
"""Return a bsp of [pos, axis, [before_node, after_node]] nodes where leaf
nodes == None.
If max_depth < 0 this function will continue until all leaf nodes have
been found, if it is >= 0 leaf nodes will be created at that depth.
min_w and min_h are the minimum width or height of a partition.
"""
if max_depth >= 0 and depth >= max_depth:
return None
partition = get_best_partition(image, min_w=min_w, min_h=min_h)
if partition is None:
return None
pos, axis = partition
if axis == 0:
p1 = binary_partition_image(
image[pos:], min_w, min_h, depth + 1, max_depth)
p2 = binary_partition_image(
image[:pos], min_w, min_h, depth + 1, max_depth)
elif axis == 1:
p1 = binary_partition_image(
image[:, pos:], min_w, min_h, depth + 1, max_depth)
p2 = binary_partition_image(
image[:, :pos], min_w, min_h, depth + 1, max_depth)
return [pos, axis, [p1, p2]]
def draw_binary_partition(image, subdiv, res_image=None, counter=None):
if res_image is None:
res_image = numpy.zeros(image.shape)
if counter is None:
counter = itertools.count(15)
pos, axis, (p1, p2) = subdiv
if axis == 0:
s1, s2 = res_image[pos:], res_image[:pos]
i1, i2 = image[pos:], image[:pos]
else:
s1, s2 = res_image[:, pos:], res_image[:, :pos]
i1, i2 = image[:, pos:], image[:, :pos]
if p1 is None:
if numpy.count_nonzero(i1):
s1[:] = next(counter)
else:
draw_binary_partition(i1, p1, s1, counter)
if p2 is None:
if numpy.count_nonzero(i2):
s2[:] = next(counter)
else:
draw_binary_partition(i2, p2, s2, counter)
return res_image
def rescale2avg(image):
sub1 = image[:-1:2, :-1:2]
sub2 = image[1::2, 1::2]
res = numpy.zeros(sub1.shape, numpy.uint32)
res += sub1
res += sub2
res /= 2
return res.astype(numpy.uint8)
def rescale2max(image):
sub1 = image[:-1:2, :-1:2]
sub2 = image[1::2, 1::2]
if len(image.shape) == 3:
max_map = grey_scale(sub1) > grey_scale(sub2)
else:
max_map = sub1 > sub2
inv_max_map = max_map == False
res = numpy.zeros(sub1.shape, numpy.uint8)
res[max_map] = sub1[max_map]
res[inv_max_map] = sub2[inv_max_map]
return res
def rescale3avg(image):
sub1 = image[:-2:3, :-2:3]
sub2 = image[1:-1:3, 1:-1:3]
sub3 = image[2::3, 2::3]
res = numpy.zeros(sub1.shape, numpy.uint32)
res += sub1
res += sub2
res += sub3
res /= 3
return res.astype(numpy.uint8)
def rescale3max(image):
sub1 = image[:-2:3, :-2:3]
sub2 = image[1:-1:3, 1:-1:3]
sub3 = image[2::3, 2::3]
if len(image.shape) == 3:
grey1 = grey_scale(sub1)
grey2 = grey_scale(sub2)
grey3 = grey_scale(sub3)
else:
grey1, grey2, grey3 = sub1, sub2, sub3
max_map_1 = (grey1 > grey2) & (grey1 > grey3)
max_map_2 = (grey2 > grey1) & (grey2 > grey3)
max_map_3 = (max_map_1 | max_map_2) == False
res = numpy.zeros(sub1.shape, numpy.uint8)
res[max_map_1] = sub1[max_map_1]
res[max_map_2] = sub2[max_map_2]
res[max_map_3] = sub3[max_map_3]
return res
def or_reduce_rescale3max_offset(image):
return reduce(operator.or_,
(rescale3max(image[y1:y2, x1:x2]) for (y1, y2), (x1, x2)
in itertools.product(
*[[(i, -(3 - i)) for i in range(3)]] * 2)
)
)
def numpy_or_all(images):
it = iter(images)
result = numpy.copy(next(it))
for image in it:
result |= image
return result
def grey_scale(image):
"""Converts RGB image to Greyscale
:param image: input image
:type image: 3 channel 3d :class:`numpy.ndarray`
:rtype: :class:`numpy.ndarray`
"""
return image.astype(numpy.int32).sum(axis=2) // 3
def find_edges(image):
base = image[:, :, numpy.newaxis].astype(numpy.int16)
c = base[1:-1, 1:-1]
e = base[1:-1, :-2]
w = base[1:-1, 2:]
s = base[:-2, 1:-1]
n = base[2:, 1:-1]
diffs = numpy.concatenate([c-e, c-w, c-s, c-n], axis=2).max(axis=2)
diffs[diffs < 0] = 0
dh, dw = diffs.shape
col = numpy.zeros((dh, 1), numpy.uint8)
row = numpy.zeros((1, dw+2), numpy.uint8)
return numpy.vstack(
(row, numpy.hstack((col, diffs.astype(numpy.uint8), col)), row)
)
def find_threshold_near_density(img, density, low=0, high=255):
"""Find a threshold where the fraction of pixels above the threshold
is closest to density where density is (count of pixels above
threshold / count of pixels).
The highest threshold closest to the desired density will be returned.
Use low and high to exclude undesirable thresholds.
:param img: target image
:type img: 2d :class:`numpy.ndarray`
:param density: target density
:type density: float between 0.0 and 1.0
:param low: min threshold to test
:type low: ubyte
:param migh: max threshold to test
:type low: ubyte
:rtype: ubyte
"""
size = numpy.size(img)
densities = []
last_t = None
while True:
t = ((high - low) // 2) + low
if t == last_t:
densities.sort(key=lambda x: (abs(x[0] - density), 256 - x[1]))
return densities[0][1]
else:
last_t = t
d = numpy.count_nonzero(img > t) / size
densities.append((d, t))
if d < density:
high = t
elif d >= density: # search away from low
low = t
def filter_greys_using_image(image, target):
"""Filter out any values in target not in image
:param image: image containing values to appear in filtered image
:param target: the image to filter
:rtype: 2d :class:`numpy.ndarray` containing only value in image
and with the same dimensions as target
"""
maskbase = numpy.array(range(256), dtype=numpy.uint8)
mask = numpy.where(numpy.in1d(maskbase, numpy.unique(image)), maskbase, 0)
return mask[target]
def correlation_coefficient_normed(template, image):
h, w = template.shape[:2]
H, W = image.shape[:2]
template_size = template.size
template_distance = template - (template.sum() / template_size)
corr_num = numpy.zeros((H, W), numpy.float64)
corr_denum = numpy.zeros((H, W), numpy.float64)
for y in xrange(H):
for x in xrange(W):
image_in_template_area = image[y:y + h, x:x + w]
image_distance_of_template_area = (
image_in_template_area - (
image_in_template_area.sum() /
template_size
)
)
I_H, I_W = image_distance_of_template_area.shape[:2]
sum_of_template_by_image_distance_at_xy = (
image_distance_of_template_area * template_distance[:I_H, :I_W]
).sum()
corr_num[y, x] = sum_of_template_by_image_distance_at_xy
corr_denum[y, x] = numpy.sqrt(
(template_distance ** 2).sum() *
(image_distance_of_template_area ** 2).sum()
)
print y
return corr_num / corr_denum
|
ten10solutions/Geist
|
geist/vision.py
|
get_partition_scores
|
python
|
def get_partition_scores(image, min_w=1, min_h=1):
"""Return list of best to worst binary splits along the x and y axis.
"""
h, w = image.shape[:2]
if w == 0 or h == 0:
return []
area = h * w
cnz = numpy.count_nonzero
total = cnz(image)
if total == 0 or area == total:
return []
if h < min_h * 2:
y_c = []
else:
y_c = [(-abs((count / ((h - y) * w)) - ((total - count) / (y * w))),
y, 0)
for count, y in ((cnz(image[y:]), y)
for y in range(min_h, image.shape[0] - min_h))]
if w < min_w * 2:
x_c = []
else:
x_c = [(-abs((count / (h * (w - x))) - ((total - count) / (h * x))),
x, 1)
for count, x in ((cnz(image[:, x:]), x)
for x in range(min_w, image.shape[1] - min_w))]
return sorted(x_c + y_c)
|
Return list of best to worst binary splits along the x and y axis.
|
train
|
https://github.com/ten10solutions/Geist/blob/a1ef16d8b4c3777735008b671a50acfde3ce7bf1/geist/vision.py#L259-L285
| null |
from __future__ import division
from numpy.fft import irfft2, rfft2
import numpy
import itertools
import operator
def subimage(rect, image):
x, y, w, h = rect
return image[y:y + h, x:x + w]
def pad_bin_image_to_shape(image, shape):
"""
Padd image to size :shape: with zeros
"""
h, w = shape
ih, iw = image.shape
assert ih <= h
assert iw <= w
if iw < w:
result = numpy.hstack((image, numpy.zeros((ih, w - iw), bool)))
else:
result = image
if ih < h:
result = numpy.vstack((result, numpy.zeros((h - ih, w), bool)))
return result
def sum_2d_images(images):
it = iter(images)
result = numpy.copy(next(it))
for image in it:
result += image
return result
OVERLAP_TABLE = {
16: (16, 8, 4, 2, 1),
15: (15, 5, 3, 1),
12: (12, 6, 4, 3, 2, 1),
10: (10, 5, 2, 1),
9: (9, 3, 1),
8: (8, 4, 2, 1),
6: (6, 3, 2, 1),
5: (5, 1),
4: (4, 2, 1),
3: (3, 1),
2: (2, 1)
}
# A number near float max which we don't want to get near to keep precision
# If you get false matches consider reducing this number.
ACCURACY_LIMIT = 2 ** (64 - 23)
def best_convolution(bin_template, bin_image,
tollerance=0.5, overlap_table=OVERLAP_TABLE):
"""
Selects and applies the best convolution method to find template in image.
Returns a list of matches in (width, height, x offset, y offset)
format (where the x and y offsets are from the top left corner).
As the images are binary images, we can utilise the extra bit space in the
float64's by cutting the image into tiles and stacking them into variable
grayscale values.
This allows converting a sparse binary image into a dense(r) grayscale one.
"""
template_sum = numpy.count_nonzero(bin_template)
th, tw = bin_template.shape
ih, iw = bin_image.shape
if template_sum == 0 or th == 0 or tw == 0:
# If we don't have a template
return []
if th > ih or tw > iw:
# If the template is bigger than the image
return []
# How many cells can we split the image into?
max_vert_cells = ih // th
max_hor_cells = iw // th
# Try to work out how many times we can stack the image
usable_factors = {n: factors for n, factors in overlap_table.iteritems()
if ((template_sum + 1) ** (n)) < ACCURACY_LIMIT}
overlap_options = [(factor, n // factor)
for n, factors in usable_factors.iteritems()
for factor in factors
if (factor <= max_vert_cells and
n // factor <= max_hor_cells)]
if not overlap_options:
# We can't stack the image
return convolution(bin_template, bin_image, tollerance=tollerance)
best_overlap = min(overlap_options,
key=lambda x: ((ih // x[0] + th) * (iw // x[1] + tw)))
return overlapped_convolution(bin_template, bin_image,
tollerance=tollerance, splits=best_overlap)
def convolution(bin_template, bin_image, tollerance=0.5):
expected = numpy.count_nonzero(bin_template)
ih, iw = bin_image.shape
th, tw = bin_template.shape
# Padd image to even dimensions
if ih % 2 or iw % 2:
if ih % 2:
ih += 1
if iw % 2:
iw += 1
bin_image = pad_bin_image_to_shape(bin_image, (ih, iw))
if expected == 0:
return []
# Calculate the convolution of the FFT's of the image & template
convolution_freqs = rfft2(bin_image) * rfft2(bin_template[::-1, ::-1],
bin_image.shape)
# Reverse the FFT to find the result image
convolution_image = irfft2(convolution_freqs)
# At this point, the maximum point in convolution_image should be the
# bottom right (why?) of the area of greatest match
# The areas in the result image within expected +- tollerance are where we
# saw matches
found_bitmap = ((convolution_image > (expected - tollerance)) &
(convolution_image < (expected + tollerance)))
match_points = numpy.transpose(numpy.nonzero(found_bitmap)) # bottom right
# Find the top left point from the template (remember match_point is
# inside the template (hence -1)
return [((fx - (tw - 1)), (fy - (th - 1))) for (fy, fx) in match_points]
def overlapped_convolution(bin_template, bin_image,
tollerance=0.5, splits=(4, 2)):
"""
As each of these images are hold only binary values, and RFFT2 works on
float64 greyscale values, we can make the convolution more efficient by
breaking the image up into :splits: sectons. Each one of these sections
then has its greyscale value adjusted and then stacked.
We then apply the convolution to this 'stack' of images, and adjust the
resultant position matches.
"""
th, tw = bin_template.shape
ih, iw = bin_image.shape
hs, ws = splits
h = ih // hs
w = iw // ws
count = numpy.count_nonzero(bin_template)
assert count > 0
assert h >= th
assert w >= tw
yoffset = [(i * h, ((i + 1) * h) + (th - 1)) for i in range(hs)]
xoffset = [(i * w, ((i + 1) * w) + (tw - 1)) for i in range(ws)]
# image_stacks is Origin (x,y), array, z (height in stack)
image_stacks = [((x1, y1), bin_image[y1:y2, x1:x2], float((count + 1) ** (num)))
for num, (x1, x2, y1, y2) in
enumerate((x1, x2, y1, y2) for (x1, x2)
in xoffset for (y1, y2) in yoffset)]
pad_h = max(i.shape[0] for _, i, _ in image_stacks)
pad_w = max(i.shape[1] for _, i, _ in image_stacks)
# rfft metrics must be an even size - why ... maths?
pad_w += pad_w % 2
pad_h += pad_h % 2
overlapped_image = sum_2d_images(pad_bin_image_to_shape(i, (pad_h, pad_w))
* num for _, i, num in image_stacks)
#print "Overlap splits %r, Image Size (%d,%d),
#Overlapped Size (%d,%d)" % (splits,iw,ih,pad_w,pad_h)
# Calculate the convolution of the FFT's of the overlapped image & template
convolution_freqs = (rfft2(overlapped_image) *
rfft2(bin_template[::-1, ::-1],
overlapped_image.shape))
# Reverse the FFT to find the result overlapped image
convolution_image = irfft2(convolution_freqs)
# At this point, the maximum point in convolution_image should be the
# bottom right (why?) of the area of greatest match
results = set()
for (x, y), _, num in image_stacks[::-1]:
test = convolution_image / num
filtered = ((test >= (count - tollerance)) &
(test <= (count + tollerance)))
match_points = numpy.transpose(numpy.nonzero(filtered)) # bottom right
for (fy, fx) in match_points:
if fx < (tw - 1) or fy < (th - 1):
continue
results.add((x + fx - (tw - 1), y + fy - (th - 1)))
convolution_image %= num
return list(results)
def get_possible_convolution_regions(bin_template, bin_image,
tollerance=0.5, rescale=10):
result = []
h, w = bin_template.shape[:2]
rects = set((x, y, w, h) for x, y in convolution_r3m_targets(
bin_template, bin_image, tollerance))
ih, iw = bin_image.shape[:2]
# areas of interest
aoi = numpy.zeros((ih // rescale + 1, iw // rescale + 1), bool)
for x, y, w, h in rects:
aoi[y // rescale:(y + h) // rescale + 1,
x // rescale:(x + w) // rescale + 1] = True
bp = binary_partition_image(aoi, min_w=w // rescale, min_h=h // rescale)
if bp:
bp = prune_unbeneficial_partitions(aoi, bp)
return result
def binary_partition_to_rects(bp, image, template_w, template_h,
xoffset=0, yoffset=0):
h, w = image.shape[2:]
if bp is None:
return [(xoffset, yoffset, w, h)]
pos, axis, (p1, p2) = bp
if axis == 0:
new_xoffset, new_yoffset = xoffset, yoffset + pos
i1, i2 = image[pos:], image[:pos]
else:
new_xoffset, new_yoffset = xoffset + pos, yoffset
i1, i2 = image[:, pos:], image[:, :pos]
def prune_unbeneficial_partitions(image, bp):
pos, axis, (p1, p2) = bp
if axis == 0:
i1, i2 = image[pos:], image[:pos]
else:
i1, i2 = image[:, pos:], image[:, :pos]
if p1 is None:
p1_result = numpy.count_nonzero(i1) == 0
else:
p1_result = prune_unbeneficial_partitions(i1, p1)
if p2 is None:
p2_result = numpy.count_nonzero(i2) == 0
else:
p2_result = prune_unbeneficial_partitions(i2, p2)
if p1_result or p2_result:
return [pos,
axis,
[None if p1_result in [True, False] else p1_result,
None if p2_result in [True, False] else p2_result]]
else:
return None
def get_partition_scores(image, min_w=1, min_h=1):
"""Return list of best to worst binary splits along the x and y axis.
"""
h, w = image.shape[:2]
if w == 0 or h == 0:
return []
area = h * w
cnz = numpy.count_nonzero
total = cnz(image)
if total == 0 or area == total:
return []
if h < min_h * 2:
y_c = []
else:
y_c = [(-abs((count / ((h - y) * w)) - ((total - count) / (y * w))),
y, 0)
for count, y in ((cnz(image[y:]), y)
for y in range(min_h, image.shape[0] - min_h))]
if w < min_w * 2:
x_c = []
else:
x_c = [(-abs((count / (h * (w - x))) - ((total - count) / (h * x))),
x, 1)
for count, x in ((cnz(image[:, x:]), x)
for x in range(min_w, image.shape[1] - min_w))]
return sorted(x_c + y_c)
def get_best_partition(image, min_w=1, min_h=1):
partitions = get_partition_scores(image, min_w=min_w, min_h=min_h)
if partitions:
return partitions[0][-2:]
else:
return None
def binary_partition_image(image, min_w=1, min_h=1, depth=0, max_depth=-1):
"""Return a bsp of [pos, axis, [before_node, after_node]] nodes where leaf
nodes == None.
If max_depth < 0 this function will continue until all leaf nodes have
been found, if it is >= 0 leaf nodes will be created at that depth.
min_w and min_h are the minimum width or height of a partition.
"""
if max_depth >= 0 and depth >= max_depth:
return None
partition = get_best_partition(image, min_w=min_w, min_h=min_h)
if partition is None:
return None
pos, axis = partition
if axis == 0:
p1 = binary_partition_image(
image[pos:], min_w, min_h, depth + 1, max_depth)
p2 = binary_partition_image(
image[:pos], min_w, min_h, depth + 1, max_depth)
elif axis == 1:
p1 = binary_partition_image(
image[:, pos:], min_w, min_h, depth + 1, max_depth)
p2 = binary_partition_image(
image[:, :pos], min_w, min_h, depth + 1, max_depth)
return [pos, axis, [p1, p2]]
def draw_binary_partition(image, subdiv, res_image=None, counter=None):
if res_image is None:
res_image = numpy.zeros(image.shape)
if counter is None:
counter = itertools.count(15)
pos, axis, (p1, p2) = subdiv
if axis == 0:
s1, s2 = res_image[pos:], res_image[:pos]
i1, i2 = image[pos:], image[:pos]
else:
s1, s2 = res_image[:, pos:], res_image[:, :pos]
i1, i2 = image[:, pos:], image[:, :pos]
if p1 is None:
if numpy.count_nonzero(i1):
s1[:] = next(counter)
else:
draw_binary_partition(i1, p1, s1, counter)
if p2 is None:
if numpy.count_nonzero(i2):
s2[:] = next(counter)
else:
draw_binary_partition(i2, p2, s2, counter)
return res_image
def rescale2avg(image):
sub1 = image[:-1:2, :-1:2]
sub2 = image[1::2, 1::2]
res = numpy.zeros(sub1.shape, numpy.uint32)
res += sub1
res += sub2
res /= 2
return res.astype(numpy.uint8)
def rescale2max(image):
sub1 = image[:-1:2, :-1:2]
sub2 = image[1::2, 1::2]
if len(image.shape) == 3:
max_map = grey_scale(sub1) > grey_scale(sub2)
else:
max_map = sub1 > sub2
inv_max_map = max_map == False
res = numpy.zeros(sub1.shape, numpy.uint8)
res[max_map] = sub1[max_map]
res[inv_max_map] = sub2[inv_max_map]
return res
def rescale3avg(image):
sub1 = image[:-2:3, :-2:3]
sub2 = image[1:-1:3, 1:-1:3]
sub3 = image[2::3, 2::3]
res = numpy.zeros(sub1.shape, numpy.uint32)
res += sub1
res += sub2
res += sub3
res /= 3
return res.astype(numpy.uint8)
def rescale3max(image):
sub1 = image[:-2:3, :-2:3]
sub2 = image[1:-1:3, 1:-1:3]
sub3 = image[2::3, 2::3]
if len(image.shape) == 3:
grey1 = grey_scale(sub1)
grey2 = grey_scale(sub2)
grey3 = grey_scale(sub3)
else:
grey1, grey2, grey3 = sub1, sub2, sub3
max_map_1 = (grey1 > grey2) & (grey1 > grey3)
max_map_2 = (grey2 > grey1) & (grey2 > grey3)
max_map_3 = (max_map_1 | max_map_2) == False
res = numpy.zeros(sub1.shape, numpy.uint8)
res[max_map_1] = sub1[max_map_1]
res[max_map_2] = sub2[max_map_2]
res[max_map_3] = sub3[max_map_3]
return res
def or_reduce_rescale3max_offset(image):
return reduce(operator.or_,
(rescale3max(image[y1:y2, x1:x2]) for (y1, y2), (x1, x2)
in itertools.product(
*[[(i, -(3 - i)) for i in range(3)]] * 2)
)
)
def numpy_or_all(images):
it = iter(images)
result = numpy.copy(next(it))
for image in it:
result |= image
return result
def grey_scale(image):
"""Converts RGB image to Greyscale
:param image: input image
:type image: 3 channel 3d :class:`numpy.ndarray`
:rtype: :class:`numpy.ndarray`
"""
return image.astype(numpy.int32).sum(axis=2) // 3
def find_edges(image):
base = image[:, :, numpy.newaxis].astype(numpy.int16)
c = base[1:-1, 1:-1]
e = base[1:-1, :-2]
w = base[1:-1, 2:]
s = base[:-2, 1:-1]
n = base[2:, 1:-1]
diffs = numpy.concatenate([c-e, c-w, c-s, c-n], axis=2).max(axis=2)
diffs[diffs < 0] = 0
dh, dw = diffs.shape
col = numpy.zeros((dh, 1), numpy.uint8)
row = numpy.zeros((1, dw+2), numpy.uint8)
return numpy.vstack(
(row, numpy.hstack((col, diffs.astype(numpy.uint8), col)), row)
)
def find_threshold_near_density(img, density, low=0, high=255):
"""Find a threshold where the fraction of pixels above the threshold
is closest to density where density is (count of pixels above
threshold / count of pixels).
The highest threshold closest to the desired density will be returned.
Use low and high to exclude undesirable thresholds.
:param img: target image
:type img: 2d :class:`numpy.ndarray`
:param density: target density
:type density: float between 0.0 and 1.0
:param low: min threshold to test
:type low: ubyte
:param migh: max threshold to test
:type low: ubyte
:rtype: ubyte
"""
size = numpy.size(img)
densities = []
last_t = None
while True:
t = ((high - low) // 2) + low
if t == last_t:
densities.sort(key=lambda x: (abs(x[0] - density), 256 - x[1]))
return densities[0][1]
else:
last_t = t
d = numpy.count_nonzero(img > t) / size
densities.append((d, t))
if d < density:
high = t
elif d >= density: # search away from low
low = t
def filter_greys_using_image(image, target):
"""Filter out any values in target not in image
:param image: image containing values to appear in filtered image
:param target: the image to filter
:rtype: 2d :class:`numpy.ndarray` containing only value in image
and with the same dimensions as target
"""
maskbase = numpy.array(range(256), dtype=numpy.uint8)
mask = numpy.where(numpy.in1d(maskbase, numpy.unique(image)), maskbase, 0)
return mask[target]
def correlation_coefficient_normed(template, image):
h, w = template.shape[:2]
H, W = image.shape[:2]
template_size = template.size
template_distance = template - (template.sum() / template_size)
corr_num = numpy.zeros((H, W), numpy.float64)
corr_denum = numpy.zeros((H, W), numpy.float64)
for y in xrange(H):
for x in xrange(W):
image_in_template_area = image[y:y + h, x:x + w]
image_distance_of_template_area = (
image_in_template_area - (
image_in_template_area.sum() /
template_size
)
)
I_H, I_W = image_distance_of_template_area.shape[:2]
sum_of_template_by_image_distance_at_xy = (
image_distance_of_template_area * template_distance[:I_H, :I_W]
).sum()
corr_num[y, x] = sum_of_template_by_image_distance_at_xy
corr_denum[y, x] = numpy.sqrt(
(template_distance ** 2).sum() *
(image_distance_of_template_area ** 2).sum()
)
print y
return corr_num / corr_denum
|
ten10solutions/Geist
|
geist/vision.py
|
binary_partition_image
|
python
|
def binary_partition_image(image, min_w=1, min_h=1, depth=0, max_depth=-1):
"""Return a bsp of [pos, axis, [before_node, after_node]] nodes where leaf
nodes == None.
If max_depth < 0 this function will continue until all leaf nodes have
been found, if it is >= 0 leaf nodes will be created at that depth.
min_w and min_h are the minimum width or height of a partition.
"""
if max_depth >= 0 and depth >= max_depth:
return None
partition = get_best_partition(image, min_w=min_w, min_h=min_h)
if partition is None:
return None
pos, axis = partition
if axis == 0:
p1 = binary_partition_image(
image[pos:], min_w, min_h, depth + 1, max_depth)
p2 = binary_partition_image(
image[:pos], min_w, min_h, depth + 1, max_depth)
elif axis == 1:
p1 = binary_partition_image(
image[:, pos:], min_w, min_h, depth + 1, max_depth)
p2 = binary_partition_image(
image[:, :pos], min_w, min_h, depth + 1, max_depth)
return [pos, axis, [p1, p2]]
|
Return a bsp of [pos, axis, [before_node, after_node]] nodes where leaf
nodes == None.
If max_depth < 0 this function will continue until all leaf nodes have
been found, if it is >= 0 leaf nodes will be created at that depth.
min_w and min_h are the minimum width or height of a partition.
|
train
|
https://github.com/ten10solutions/Geist/blob/a1ef16d8b4c3777735008b671a50acfde3ce7bf1/geist/vision.py#L296-L322
|
[
"def binary_partition_image(image, min_w=1, min_h=1, depth=0, max_depth=-1):\n \"\"\"Return a bsp of [pos, axis, [before_node, after_node]] nodes where leaf\n nodes == None.\n\n If max_depth < 0 this function will continue until all leaf nodes have\n been found, if it is >= 0 leaf nodes will be created at that depth.\n\n min_w and min_h are the minimum width or height of a partition.\n\n \"\"\"\n if max_depth >= 0 and depth >= max_depth:\n return None\n partition = get_best_partition(image, min_w=min_w, min_h=min_h)\n if partition is None:\n return None\n pos, axis = partition\n if axis == 0:\n p1 = binary_partition_image(\n image[pos:], min_w, min_h, depth + 1, max_depth)\n p2 = binary_partition_image(\n image[:pos], min_w, min_h, depth + 1, max_depth)\n elif axis == 1:\n p1 = binary_partition_image(\n image[:, pos:], min_w, min_h, depth + 1, max_depth)\n p2 = binary_partition_image(\n image[:, :pos], min_w, min_h, depth + 1, max_depth)\n return [pos, axis, [p1, p2]]\n",
"def get_best_partition(image, min_w=1, min_h=1):\n partitions = get_partition_scores(image, min_w=min_w, min_h=min_h)\n if partitions:\n return partitions[0][-2:]\n else:\n return None\n"
] |
from __future__ import division
from numpy.fft import irfft2, rfft2
import numpy
import itertools
import operator
def subimage(rect, image):
x, y, w, h = rect
return image[y:y + h, x:x + w]
def pad_bin_image_to_shape(image, shape):
"""
Padd image to size :shape: with zeros
"""
h, w = shape
ih, iw = image.shape
assert ih <= h
assert iw <= w
if iw < w:
result = numpy.hstack((image, numpy.zeros((ih, w - iw), bool)))
else:
result = image
if ih < h:
result = numpy.vstack((result, numpy.zeros((h - ih, w), bool)))
return result
def sum_2d_images(images):
it = iter(images)
result = numpy.copy(next(it))
for image in it:
result += image
return result
OVERLAP_TABLE = {
16: (16, 8, 4, 2, 1),
15: (15, 5, 3, 1),
12: (12, 6, 4, 3, 2, 1),
10: (10, 5, 2, 1),
9: (9, 3, 1),
8: (8, 4, 2, 1),
6: (6, 3, 2, 1),
5: (5, 1),
4: (4, 2, 1),
3: (3, 1),
2: (2, 1)
}
# A number near float max which we don't want to get near to keep precision
# If you get false matches consider reducing this number.
ACCURACY_LIMIT = 2 ** (64 - 23)
def best_convolution(bin_template, bin_image,
tollerance=0.5, overlap_table=OVERLAP_TABLE):
"""
Selects and applies the best convolution method to find template in image.
Returns a list of matches in (width, height, x offset, y offset)
format (where the x and y offsets are from the top left corner).
As the images are binary images, we can utilise the extra bit space in the
float64's by cutting the image into tiles and stacking them into variable
grayscale values.
This allows converting a sparse binary image into a dense(r) grayscale one.
"""
template_sum = numpy.count_nonzero(bin_template)
th, tw = bin_template.shape
ih, iw = bin_image.shape
if template_sum == 0 or th == 0 or tw == 0:
# If we don't have a template
return []
if th > ih or tw > iw:
# If the template is bigger than the image
return []
# How many cells can we split the image into?
max_vert_cells = ih // th
max_hor_cells = iw // th
# Try to work out how many times we can stack the image
usable_factors = {n: factors for n, factors in overlap_table.iteritems()
if ((template_sum + 1) ** (n)) < ACCURACY_LIMIT}
overlap_options = [(factor, n // factor)
for n, factors in usable_factors.iteritems()
for factor in factors
if (factor <= max_vert_cells and
n // factor <= max_hor_cells)]
if not overlap_options:
# We can't stack the image
return convolution(bin_template, bin_image, tollerance=tollerance)
best_overlap = min(overlap_options,
key=lambda x: ((ih // x[0] + th) * (iw // x[1] + tw)))
return overlapped_convolution(bin_template, bin_image,
tollerance=tollerance, splits=best_overlap)
def convolution(bin_template, bin_image, tollerance=0.5):
expected = numpy.count_nonzero(bin_template)
ih, iw = bin_image.shape
th, tw = bin_template.shape
# Padd image to even dimensions
if ih % 2 or iw % 2:
if ih % 2:
ih += 1
if iw % 2:
iw += 1
bin_image = pad_bin_image_to_shape(bin_image, (ih, iw))
if expected == 0:
return []
# Calculate the convolution of the FFT's of the image & template
convolution_freqs = rfft2(bin_image) * rfft2(bin_template[::-1, ::-1],
bin_image.shape)
# Reverse the FFT to find the result image
convolution_image = irfft2(convolution_freqs)
# At this point, the maximum point in convolution_image should be the
# bottom right (why?) of the area of greatest match
# The areas in the result image within expected +- tollerance are where we
# saw matches
found_bitmap = ((convolution_image > (expected - tollerance)) &
(convolution_image < (expected + tollerance)))
match_points = numpy.transpose(numpy.nonzero(found_bitmap)) # bottom right
# Find the top left point from the template (remember match_point is
# inside the template (hence -1)
return [((fx - (tw - 1)), (fy - (th - 1))) for (fy, fx) in match_points]
def overlapped_convolution(bin_template, bin_image,
tollerance=0.5, splits=(4, 2)):
"""
As each of these images are hold only binary values, and RFFT2 works on
float64 greyscale values, we can make the convolution more efficient by
breaking the image up into :splits: sectons. Each one of these sections
then has its greyscale value adjusted and then stacked.
We then apply the convolution to this 'stack' of images, and adjust the
resultant position matches.
"""
th, tw = bin_template.shape
ih, iw = bin_image.shape
hs, ws = splits
h = ih // hs
w = iw // ws
count = numpy.count_nonzero(bin_template)
assert count > 0
assert h >= th
assert w >= tw
yoffset = [(i * h, ((i + 1) * h) + (th - 1)) for i in range(hs)]
xoffset = [(i * w, ((i + 1) * w) + (tw - 1)) for i in range(ws)]
# image_stacks is Origin (x,y), array, z (height in stack)
image_stacks = [((x1, y1), bin_image[y1:y2, x1:x2], float((count + 1) ** (num)))
for num, (x1, x2, y1, y2) in
enumerate((x1, x2, y1, y2) for (x1, x2)
in xoffset for (y1, y2) in yoffset)]
pad_h = max(i.shape[0] for _, i, _ in image_stacks)
pad_w = max(i.shape[1] for _, i, _ in image_stacks)
# rfft metrics must be an even size - why ... maths?
pad_w += pad_w % 2
pad_h += pad_h % 2
overlapped_image = sum_2d_images(pad_bin_image_to_shape(i, (pad_h, pad_w))
* num for _, i, num in image_stacks)
#print "Overlap splits %r, Image Size (%d,%d),
#Overlapped Size (%d,%d)" % (splits,iw,ih,pad_w,pad_h)
# Calculate the convolution of the FFT's of the overlapped image & template
convolution_freqs = (rfft2(overlapped_image) *
rfft2(bin_template[::-1, ::-1],
overlapped_image.shape))
# Reverse the FFT to find the result overlapped image
convolution_image = irfft2(convolution_freqs)
# At this point, the maximum point in convolution_image should be the
# bottom right (why?) of the area of greatest match
results = set()
for (x, y), _, num in image_stacks[::-1]:
test = convolution_image / num
filtered = ((test >= (count - tollerance)) &
(test <= (count + tollerance)))
match_points = numpy.transpose(numpy.nonzero(filtered)) # bottom right
for (fy, fx) in match_points:
if fx < (tw - 1) or fy < (th - 1):
continue
results.add((x + fx - (tw - 1), y + fy - (th - 1)))
convolution_image %= num
return list(results)
def get_possible_convolution_regions(bin_template, bin_image,
tollerance=0.5, rescale=10):
result = []
h, w = bin_template.shape[:2]
rects = set((x, y, w, h) for x, y in convolution_r3m_targets(
bin_template, bin_image, tollerance))
ih, iw = bin_image.shape[:2]
# areas of interest
aoi = numpy.zeros((ih // rescale + 1, iw // rescale + 1), bool)
for x, y, w, h in rects:
aoi[y // rescale:(y + h) // rescale + 1,
x // rescale:(x + w) // rescale + 1] = True
bp = binary_partition_image(aoi, min_w=w // rescale, min_h=h // rescale)
if bp:
bp = prune_unbeneficial_partitions(aoi, bp)
return result
def binary_partition_to_rects(bp, image, template_w, template_h,
xoffset=0, yoffset=0):
h, w = image.shape[2:]
if bp is None:
return [(xoffset, yoffset, w, h)]
pos, axis, (p1, p2) = bp
if axis == 0:
new_xoffset, new_yoffset = xoffset, yoffset + pos
i1, i2 = image[pos:], image[:pos]
else:
new_xoffset, new_yoffset = xoffset + pos, yoffset
i1, i2 = image[:, pos:], image[:, :pos]
def prune_unbeneficial_partitions(image, bp):
pos, axis, (p1, p2) = bp
if axis == 0:
i1, i2 = image[pos:], image[:pos]
else:
i1, i2 = image[:, pos:], image[:, :pos]
if p1 is None:
p1_result = numpy.count_nonzero(i1) == 0
else:
p1_result = prune_unbeneficial_partitions(i1, p1)
if p2 is None:
p2_result = numpy.count_nonzero(i2) == 0
else:
p2_result = prune_unbeneficial_partitions(i2, p2)
if p1_result or p2_result:
return [pos,
axis,
[None if p1_result in [True, False] else p1_result,
None if p2_result in [True, False] else p2_result]]
else:
return None
def get_partition_scores(image, min_w=1, min_h=1):
"""Return list of best to worst binary splits along the x and y axis.
"""
h, w = image.shape[:2]
if w == 0 or h == 0:
return []
area = h * w
cnz = numpy.count_nonzero
total = cnz(image)
if total == 0 or area == total:
return []
if h < min_h * 2:
y_c = []
else:
y_c = [(-abs((count / ((h - y) * w)) - ((total - count) / (y * w))),
y, 0)
for count, y in ((cnz(image[y:]), y)
for y in range(min_h, image.shape[0] - min_h))]
if w < min_w * 2:
x_c = []
else:
x_c = [(-abs((count / (h * (w - x))) - ((total - count) / (h * x))),
x, 1)
for count, x in ((cnz(image[:, x:]), x)
for x in range(min_w, image.shape[1] - min_w))]
return sorted(x_c + y_c)
def get_best_partition(image, min_w=1, min_h=1):
partitions = get_partition_scores(image, min_w=min_w, min_h=min_h)
if partitions:
return partitions[0][-2:]
else:
return None
def binary_partition_image(image, min_w=1, min_h=1, depth=0, max_depth=-1):
"""Return a bsp of [pos, axis, [before_node, after_node]] nodes where leaf
nodes == None.
If max_depth < 0 this function will continue until all leaf nodes have
been found, if it is >= 0 leaf nodes will be created at that depth.
min_w and min_h are the minimum width or height of a partition.
"""
if max_depth >= 0 and depth >= max_depth:
return None
partition = get_best_partition(image, min_w=min_w, min_h=min_h)
if partition is None:
return None
pos, axis = partition
if axis == 0:
p1 = binary_partition_image(
image[pos:], min_w, min_h, depth + 1, max_depth)
p2 = binary_partition_image(
image[:pos], min_w, min_h, depth + 1, max_depth)
elif axis == 1:
p1 = binary_partition_image(
image[:, pos:], min_w, min_h, depth + 1, max_depth)
p2 = binary_partition_image(
image[:, :pos], min_w, min_h, depth + 1, max_depth)
return [pos, axis, [p1, p2]]
def draw_binary_partition(image, subdiv, res_image=None, counter=None):
if res_image is None:
res_image = numpy.zeros(image.shape)
if counter is None:
counter = itertools.count(15)
pos, axis, (p1, p2) = subdiv
if axis == 0:
s1, s2 = res_image[pos:], res_image[:pos]
i1, i2 = image[pos:], image[:pos]
else:
s1, s2 = res_image[:, pos:], res_image[:, :pos]
i1, i2 = image[:, pos:], image[:, :pos]
if p1 is None:
if numpy.count_nonzero(i1):
s1[:] = next(counter)
else:
draw_binary_partition(i1, p1, s1, counter)
if p2 is None:
if numpy.count_nonzero(i2):
s2[:] = next(counter)
else:
draw_binary_partition(i2, p2, s2, counter)
return res_image
def rescale2avg(image):
sub1 = image[:-1:2, :-1:2]
sub2 = image[1::2, 1::2]
res = numpy.zeros(sub1.shape, numpy.uint32)
res += sub1
res += sub2
res /= 2
return res.astype(numpy.uint8)
def rescale2max(image):
sub1 = image[:-1:2, :-1:2]
sub2 = image[1::2, 1::2]
if len(image.shape) == 3:
max_map = grey_scale(sub1) > grey_scale(sub2)
else:
max_map = sub1 > sub2
inv_max_map = max_map == False
res = numpy.zeros(sub1.shape, numpy.uint8)
res[max_map] = sub1[max_map]
res[inv_max_map] = sub2[inv_max_map]
return res
def rescale3avg(image):
sub1 = image[:-2:3, :-2:3]
sub2 = image[1:-1:3, 1:-1:3]
sub3 = image[2::3, 2::3]
res = numpy.zeros(sub1.shape, numpy.uint32)
res += sub1
res += sub2
res += sub3
res /= 3
return res.astype(numpy.uint8)
def rescale3max(image):
sub1 = image[:-2:3, :-2:3]
sub2 = image[1:-1:3, 1:-1:3]
sub3 = image[2::3, 2::3]
if len(image.shape) == 3:
grey1 = grey_scale(sub1)
grey2 = grey_scale(sub2)
grey3 = grey_scale(sub3)
else:
grey1, grey2, grey3 = sub1, sub2, sub3
max_map_1 = (grey1 > grey2) & (grey1 > grey3)
max_map_2 = (grey2 > grey1) & (grey2 > grey3)
max_map_3 = (max_map_1 | max_map_2) == False
res = numpy.zeros(sub1.shape, numpy.uint8)
res[max_map_1] = sub1[max_map_1]
res[max_map_2] = sub2[max_map_2]
res[max_map_3] = sub3[max_map_3]
return res
def or_reduce_rescale3max_offset(image):
return reduce(operator.or_,
(rescale3max(image[y1:y2, x1:x2]) for (y1, y2), (x1, x2)
in itertools.product(
*[[(i, -(3 - i)) for i in range(3)]] * 2)
)
)
def numpy_or_all(images):
it = iter(images)
result = numpy.copy(next(it))
for image in it:
result |= image
return result
def grey_scale(image):
"""Converts RGB image to Greyscale
:param image: input image
:type image: 3 channel 3d :class:`numpy.ndarray`
:rtype: :class:`numpy.ndarray`
"""
return image.astype(numpy.int32).sum(axis=2) // 3
def find_edges(image):
base = image[:, :, numpy.newaxis].astype(numpy.int16)
c = base[1:-1, 1:-1]
e = base[1:-1, :-2]
w = base[1:-1, 2:]
s = base[:-2, 1:-1]
n = base[2:, 1:-1]
diffs = numpy.concatenate([c-e, c-w, c-s, c-n], axis=2).max(axis=2)
diffs[diffs < 0] = 0
dh, dw = diffs.shape
col = numpy.zeros((dh, 1), numpy.uint8)
row = numpy.zeros((1, dw+2), numpy.uint8)
return numpy.vstack(
(row, numpy.hstack((col, diffs.astype(numpy.uint8), col)), row)
)
def find_threshold_near_density(img, density, low=0, high=255):
"""Find a threshold where the fraction of pixels above the threshold
is closest to density where density is (count of pixels above
threshold / count of pixels).
The highest threshold closest to the desired density will be returned.
Use low and high to exclude undesirable thresholds.
:param img: target image
:type img: 2d :class:`numpy.ndarray`
:param density: target density
:type density: float between 0.0 and 1.0
:param low: min threshold to test
:type low: ubyte
:param migh: max threshold to test
:type low: ubyte
:rtype: ubyte
"""
size = numpy.size(img)
densities = []
last_t = None
while True:
t = ((high - low) // 2) + low
if t == last_t:
densities.sort(key=lambda x: (abs(x[0] - density), 256 - x[1]))
return densities[0][1]
else:
last_t = t
d = numpy.count_nonzero(img > t) / size
densities.append((d, t))
if d < density:
high = t
elif d >= density: # search away from low
low = t
def filter_greys_using_image(image, target):
"""Filter out any values in target not in image
:param image: image containing values to appear in filtered image
:param target: the image to filter
:rtype: 2d :class:`numpy.ndarray` containing only value in image
and with the same dimensions as target
"""
maskbase = numpy.array(range(256), dtype=numpy.uint8)
mask = numpy.where(numpy.in1d(maskbase, numpy.unique(image)), maskbase, 0)
return mask[target]
def correlation_coefficient_normed(template, image):
h, w = template.shape[:2]
H, W = image.shape[:2]
template_size = template.size
template_distance = template - (template.sum() / template_size)
corr_num = numpy.zeros((H, W), numpy.float64)
corr_denum = numpy.zeros((H, W), numpy.float64)
for y in xrange(H):
for x in xrange(W):
image_in_template_area = image[y:y + h, x:x + w]
image_distance_of_template_area = (
image_in_template_area - (
image_in_template_area.sum() /
template_size
)
)
I_H, I_W = image_distance_of_template_area.shape[:2]
sum_of_template_by_image_distance_at_xy = (
image_distance_of_template_area * template_distance[:I_H, :I_W]
).sum()
corr_num[y, x] = sum_of_template_by_image_distance_at_xy
corr_denum[y, x] = numpy.sqrt(
(template_distance ** 2).sum() *
(image_distance_of_template_area ** 2).sum()
)
print y
return corr_num / corr_denum
|
ten10solutions/Geist
|
geist/vision.py
|
find_threshold_near_density
|
python
|
def find_threshold_near_density(img, density, low=0, high=255):
"""Find a threshold where the fraction of pixels above the threshold
is closest to density where density is (count of pixels above
threshold / count of pixels).
The highest threshold closest to the desired density will be returned.
Use low and high to exclude undesirable thresholds.
:param img: target image
:type img: 2d :class:`numpy.ndarray`
:param density: target density
:type density: float between 0.0 and 1.0
:param low: min threshold to test
:type low: ubyte
:param migh: max threshold to test
:type low: ubyte
:rtype: ubyte
"""
size = numpy.size(img)
densities = []
last_t = None
while True:
t = ((high - low) // 2) + low
if t == last_t:
densities.sort(key=lambda x: (abs(x[0] - density), 256 - x[1]))
return densities[0][1]
else:
last_t = t
d = numpy.count_nonzero(img > t) / size
densities.append((d, t))
if d < density:
high = t
elif d >= density: # search away from low
low = t
|
Find a threshold where the fraction of pixels above the threshold
is closest to density where density is (count of pixels above
threshold / count of pixels).
The highest threshold closest to the desired density will be returned.
Use low and high to exclude undesirable thresholds.
:param img: target image
:type img: 2d :class:`numpy.ndarray`
:param density: target density
:type density: float between 0.0 and 1.0
:param low: min threshold to test
:type low: ubyte
:param migh: max threshold to test
:type low: ubyte
:rtype: ubyte
|
train
|
https://github.com/ten10solutions/Geist/blob/a1ef16d8b4c3777735008b671a50acfde3ce7bf1/geist/vision.py#L450-L485
| null |
from __future__ import division
from numpy.fft import irfft2, rfft2
import numpy
import itertools
import operator
def subimage(rect, image):
x, y, w, h = rect
return image[y:y + h, x:x + w]
def pad_bin_image_to_shape(image, shape):
"""
Padd image to size :shape: with zeros
"""
h, w = shape
ih, iw = image.shape
assert ih <= h
assert iw <= w
if iw < w:
result = numpy.hstack((image, numpy.zeros((ih, w - iw), bool)))
else:
result = image
if ih < h:
result = numpy.vstack((result, numpy.zeros((h - ih, w), bool)))
return result
def sum_2d_images(images):
it = iter(images)
result = numpy.copy(next(it))
for image in it:
result += image
return result
OVERLAP_TABLE = {
16: (16, 8, 4, 2, 1),
15: (15, 5, 3, 1),
12: (12, 6, 4, 3, 2, 1),
10: (10, 5, 2, 1),
9: (9, 3, 1),
8: (8, 4, 2, 1),
6: (6, 3, 2, 1),
5: (5, 1),
4: (4, 2, 1),
3: (3, 1),
2: (2, 1)
}
# A number near float max which we don't want to get near to keep precision
# If you get false matches consider reducing this number.
ACCURACY_LIMIT = 2 ** (64 - 23)
def best_convolution(bin_template, bin_image,
tollerance=0.5, overlap_table=OVERLAP_TABLE):
"""
Selects and applies the best convolution method to find template in image.
Returns a list of matches in (width, height, x offset, y offset)
format (where the x and y offsets are from the top left corner).
As the images are binary images, we can utilise the extra bit space in the
float64's by cutting the image into tiles and stacking them into variable
grayscale values.
This allows converting a sparse binary image into a dense(r) grayscale one.
"""
template_sum = numpy.count_nonzero(bin_template)
th, tw = bin_template.shape
ih, iw = bin_image.shape
if template_sum == 0 or th == 0 or tw == 0:
# If we don't have a template
return []
if th > ih or tw > iw:
# If the template is bigger than the image
return []
# How many cells can we split the image into?
max_vert_cells = ih // th
max_hor_cells = iw // th
# Try to work out how many times we can stack the image
usable_factors = {n: factors for n, factors in overlap_table.iteritems()
if ((template_sum + 1) ** (n)) < ACCURACY_LIMIT}
overlap_options = [(factor, n // factor)
for n, factors in usable_factors.iteritems()
for factor in factors
if (factor <= max_vert_cells and
n // factor <= max_hor_cells)]
if not overlap_options:
# We can't stack the image
return convolution(bin_template, bin_image, tollerance=tollerance)
best_overlap = min(overlap_options,
key=lambda x: ((ih // x[0] + th) * (iw // x[1] + tw)))
return overlapped_convolution(bin_template, bin_image,
tollerance=tollerance, splits=best_overlap)
def convolution(bin_template, bin_image, tollerance=0.5):
expected = numpy.count_nonzero(bin_template)
ih, iw = bin_image.shape
th, tw = bin_template.shape
# Padd image to even dimensions
if ih % 2 or iw % 2:
if ih % 2:
ih += 1
if iw % 2:
iw += 1
bin_image = pad_bin_image_to_shape(bin_image, (ih, iw))
if expected == 0:
return []
# Calculate the convolution of the FFT's of the image & template
convolution_freqs = rfft2(bin_image) * rfft2(bin_template[::-1, ::-1],
bin_image.shape)
# Reverse the FFT to find the result image
convolution_image = irfft2(convolution_freqs)
# At this point, the maximum point in convolution_image should be the
# bottom right (why?) of the area of greatest match
# The areas in the result image within expected +- tollerance are where we
# saw matches
found_bitmap = ((convolution_image > (expected - tollerance)) &
(convolution_image < (expected + tollerance)))
match_points = numpy.transpose(numpy.nonzero(found_bitmap)) # bottom right
# Find the top left point from the template (remember match_point is
# inside the template (hence -1)
return [((fx - (tw - 1)), (fy - (th - 1))) for (fy, fx) in match_points]
def overlapped_convolution(bin_template, bin_image,
tollerance=0.5, splits=(4, 2)):
"""
As each of these images are hold only binary values, and RFFT2 works on
float64 greyscale values, we can make the convolution more efficient by
breaking the image up into :splits: sectons. Each one of these sections
then has its greyscale value adjusted and then stacked.
We then apply the convolution to this 'stack' of images, and adjust the
resultant position matches.
"""
th, tw = bin_template.shape
ih, iw = bin_image.shape
hs, ws = splits
h = ih // hs
w = iw // ws
count = numpy.count_nonzero(bin_template)
assert count > 0
assert h >= th
assert w >= tw
yoffset = [(i * h, ((i + 1) * h) + (th - 1)) for i in range(hs)]
xoffset = [(i * w, ((i + 1) * w) + (tw - 1)) for i in range(ws)]
# image_stacks is Origin (x,y), array, z (height in stack)
image_stacks = [((x1, y1), bin_image[y1:y2, x1:x2], float((count + 1) ** (num)))
for num, (x1, x2, y1, y2) in
enumerate((x1, x2, y1, y2) for (x1, x2)
in xoffset for (y1, y2) in yoffset)]
pad_h = max(i.shape[0] for _, i, _ in image_stacks)
pad_w = max(i.shape[1] for _, i, _ in image_stacks)
# rfft metrics must be an even size - why ... maths?
pad_w += pad_w % 2
pad_h += pad_h % 2
overlapped_image = sum_2d_images(pad_bin_image_to_shape(i, (pad_h, pad_w))
* num for _, i, num in image_stacks)
#print "Overlap splits %r, Image Size (%d,%d),
#Overlapped Size (%d,%d)" % (splits,iw,ih,pad_w,pad_h)
# Calculate the convolution of the FFT's of the overlapped image & template
convolution_freqs = (rfft2(overlapped_image) *
rfft2(bin_template[::-1, ::-1],
overlapped_image.shape))
# Reverse the FFT to find the result overlapped image
convolution_image = irfft2(convolution_freqs)
# At this point, the maximum point in convolution_image should be the
# bottom right (why?) of the area of greatest match
results = set()
for (x, y), _, num in image_stacks[::-1]:
test = convolution_image / num
filtered = ((test >= (count - tollerance)) &
(test <= (count + tollerance)))
match_points = numpy.transpose(numpy.nonzero(filtered)) # bottom right
for (fy, fx) in match_points:
if fx < (tw - 1) or fy < (th - 1):
continue
results.add((x + fx - (tw - 1), y + fy - (th - 1)))
convolution_image %= num
return list(results)
def get_possible_convolution_regions(bin_template, bin_image,
tollerance=0.5, rescale=10):
result = []
h, w = bin_template.shape[:2]
rects = set((x, y, w, h) for x, y in convolution_r3m_targets(
bin_template, bin_image, tollerance))
ih, iw = bin_image.shape[:2]
# areas of interest
aoi = numpy.zeros((ih // rescale + 1, iw // rescale + 1), bool)
for x, y, w, h in rects:
aoi[y // rescale:(y + h) // rescale + 1,
x // rescale:(x + w) // rescale + 1] = True
bp = binary_partition_image(aoi, min_w=w // rescale, min_h=h // rescale)
if bp:
bp = prune_unbeneficial_partitions(aoi, bp)
return result
def binary_partition_to_rects(bp, image, template_w, template_h,
xoffset=0, yoffset=0):
h, w = image.shape[2:]
if bp is None:
return [(xoffset, yoffset, w, h)]
pos, axis, (p1, p2) = bp
if axis == 0:
new_xoffset, new_yoffset = xoffset, yoffset + pos
i1, i2 = image[pos:], image[:pos]
else:
new_xoffset, new_yoffset = xoffset + pos, yoffset
i1, i2 = image[:, pos:], image[:, :pos]
def prune_unbeneficial_partitions(image, bp):
pos, axis, (p1, p2) = bp
if axis == 0:
i1, i2 = image[pos:], image[:pos]
else:
i1, i2 = image[:, pos:], image[:, :pos]
if p1 is None:
p1_result = numpy.count_nonzero(i1) == 0
else:
p1_result = prune_unbeneficial_partitions(i1, p1)
if p2 is None:
p2_result = numpy.count_nonzero(i2) == 0
else:
p2_result = prune_unbeneficial_partitions(i2, p2)
if p1_result or p2_result:
return [pos,
axis,
[None if p1_result in [True, False] else p1_result,
None if p2_result in [True, False] else p2_result]]
else:
return None
def get_partition_scores(image, min_w=1, min_h=1):
"""Return list of best to worst binary splits along the x and y axis.
"""
h, w = image.shape[:2]
if w == 0 or h == 0:
return []
area = h * w
cnz = numpy.count_nonzero
total = cnz(image)
if total == 0 or area == total:
return []
if h < min_h * 2:
y_c = []
else:
y_c = [(-abs((count / ((h - y) * w)) - ((total - count) / (y * w))),
y, 0)
for count, y in ((cnz(image[y:]), y)
for y in range(min_h, image.shape[0] - min_h))]
if w < min_w * 2:
x_c = []
else:
x_c = [(-abs((count / (h * (w - x))) - ((total - count) / (h * x))),
x, 1)
for count, x in ((cnz(image[:, x:]), x)
for x in range(min_w, image.shape[1] - min_w))]
return sorted(x_c + y_c)
def get_best_partition(image, min_w=1, min_h=1):
partitions = get_partition_scores(image, min_w=min_w, min_h=min_h)
if partitions:
return partitions[0][-2:]
else:
return None
def binary_partition_image(image, min_w=1, min_h=1, depth=0, max_depth=-1):
"""Return a bsp of [pos, axis, [before_node, after_node]] nodes where leaf
nodes == None.
If max_depth < 0 this function will continue until all leaf nodes have
been found, if it is >= 0 leaf nodes will be created at that depth.
min_w and min_h are the minimum width or height of a partition.
"""
if max_depth >= 0 and depth >= max_depth:
return None
partition = get_best_partition(image, min_w=min_w, min_h=min_h)
if partition is None:
return None
pos, axis = partition
if axis == 0:
p1 = binary_partition_image(
image[pos:], min_w, min_h, depth + 1, max_depth)
p2 = binary_partition_image(
image[:pos], min_w, min_h, depth + 1, max_depth)
elif axis == 1:
p1 = binary_partition_image(
image[:, pos:], min_w, min_h, depth + 1, max_depth)
p2 = binary_partition_image(
image[:, :pos], min_w, min_h, depth + 1, max_depth)
return [pos, axis, [p1, p2]]
def draw_binary_partition(image, subdiv, res_image=None, counter=None):
if res_image is None:
res_image = numpy.zeros(image.shape)
if counter is None:
counter = itertools.count(15)
pos, axis, (p1, p2) = subdiv
if axis == 0:
s1, s2 = res_image[pos:], res_image[:pos]
i1, i2 = image[pos:], image[:pos]
else:
s1, s2 = res_image[:, pos:], res_image[:, :pos]
i1, i2 = image[:, pos:], image[:, :pos]
if p1 is None:
if numpy.count_nonzero(i1):
s1[:] = next(counter)
else:
draw_binary_partition(i1, p1, s1, counter)
if p2 is None:
if numpy.count_nonzero(i2):
s2[:] = next(counter)
else:
draw_binary_partition(i2, p2, s2, counter)
return res_image
def rescale2avg(image):
sub1 = image[:-1:2, :-1:2]
sub2 = image[1::2, 1::2]
res = numpy.zeros(sub1.shape, numpy.uint32)
res += sub1
res += sub2
res /= 2
return res.astype(numpy.uint8)
def rescale2max(image):
sub1 = image[:-1:2, :-1:2]
sub2 = image[1::2, 1::2]
if len(image.shape) == 3:
max_map = grey_scale(sub1) > grey_scale(sub2)
else:
max_map = sub1 > sub2
inv_max_map = max_map == False
res = numpy.zeros(sub1.shape, numpy.uint8)
res[max_map] = sub1[max_map]
res[inv_max_map] = sub2[inv_max_map]
return res
def rescale3avg(image):
sub1 = image[:-2:3, :-2:3]
sub2 = image[1:-1:3, 1:-1:3]
sub3 = image[2::3, 2::3]
res = numpy.zeros(sub1.shape, numpy.uint32)
res += sub1
res += sub2
res += sub3
res /= 3
return res.astype(numpy.uint8)
def rescale3max(image):
sub1 = image[:-2:3, :-2:3]
sub2 = image[1:-1:3, 1:-1:3]
sub3 = image[2::3, 2::3]
if len(image.shape) == 3:
grey1 = grey_scale(sub1)
grey2 = grey_scale(sub2)
grey3 = grey_scale(sub3)
else:
grey1, grey2, grey3 = sub1, sub2, sub3
max_map_1 = (grey1 > grey2) & (grey1 > grey3)
max_map_2 = (grey2 > grey1) & (grey2 > grey3)
max_map_3 = (max_map_1 | max_map_2) == False
res = numpy.zeros(sub1.shape, numpy.uint8)
res[max_map_1] = sub1[max_map_1]
res[max_map_2] = sub2[max_map_2]
res[max_map_3] = sub3[max_map_3]
return res
def or_reduce_rescale3max_offset(image):
return reduce(operator.or_,
(rescale3max(image[y1:y2, x1:x2]) for (y1, y2), (x1, x2)
in itertools.product(
*[[(i, -(3 - i)) for i in range(3)]] * 2)
)
)
def numpy_or_all(images):
it = iter(images)
result = numpy.copy(next(it))
for image in it:
result |= image
return result
def grey_scale(image):
"""Converts RGB image to Greyscale
:param image: input image
:type image: 3 channel 3d :class:`numpy.ndarray`
:rtype: :class:`numpy.ndarray`
"""
return image.astype(numpy.int32).sum(axis=2) // 3
def find_edges(image):
base = image[:, :, numpy.newaxis].astype(numpy.int16)
c = base[1:-1, 1:-1]
e = base[1:-1, :-2]
w = base[1:-1, 2:]
s = base[:-2, 1:-1]
n = base[2:, 1:-1]
diffs = numpy.concatenate([c-e, c-w, c-s, c-n], axis=2).max(axis=2)
diffs[diffs < 0] = 0
dh, dw = diffs.shape
col = numpy.zeros((dh, 1), numpy.uint8)
row = numpy.zeros((1, dw+2), numpy.uint8)
return numpy.vstack(
(row, numpy.hstack((col, diffs.astype(numpy.uint8), col)), row)
)
def find_threshold_near_density(img, density, low=0, high=255):
"""Find a threshold where the fraction of pixels above the threshold
is closest to density where density is (count of pixels above
threshold / count of pixels).
The highest threshold closest to the desired density will be returned.
Use low and high to exclude undesirable thresholds.
:param img: target image
:type img: 2d :class:`numpy.ndarray`
:param density: target density
:type density: float between 0.0 and 1.0
:param low: min threshold to test
:type low: ubyte
:param migh: max threshold to test
:type low: ubyte
:rtype: ubyte
"""
size = numpy.size(img)
densities = []
last_t = None
while True:
t = ((high - low) // 2) + low
if t == last_t:
densities.sort(key=lambda x: (abs(x[0] - density), 256 - x[1]))
return densities[0][1]
else:
last_t = t
d = numpy.count_nonzero(img > t) / size
densities.append((d, t))
if d < density:
high = t
elif d >= density: # search away from low
low = t
def filter_greys_using_image(image, target):
"""Filter out any values in target not in image
:param image: image containing values to appear in filtered image
:param target: the image to filter
:rtype: 2d :class:`numpy.ndarray` containing only value in image
and with the same dimensions as target
"""
maskbase = numpy.array(range(256), dtype=numpy.uint8)
mask = numpy.where(numpy.in1d(maskbase, numpy.unique(image)), maskbase, 0)
return mask[target]
def correlation_coefficient_normed(template, image):
h, w = template.shape[:2]
H, W = image.shape[:2]
template_size = template.size
template_distance = template - (template.sum() / template_size)
corr_num = numpy.zeros((H, W), numpy.float64)
corr_denum = numpy.zeros((H, W), numpy.float64)
for y in xrange(H):
for x in xrange(W):
image_in_template_area = image[y:y + h, x:x + w]
image_distance_of_template_area = (
image_in_template_area - (
image_in_template_area.sum() /
template_size
)
)
I_H, I_W = image_distance_of_template_area.shape[:2]
sum_of_template_by_image_distance_at_xy = (
image_distance_of_template_area * template_distance[:I_H, :I_W]
).sum()
corr_num[y, x] = sum_of_template_by_image_distance_at_xy
corr_denum[y, x] = numpy.sqrt(
(template_distance ** 2).sum() *
(image_distance_of_template_area ** 2).sum()
)
print y
return corr_num / corr_denum
|
ten10solutions/Geist
|
geist/vision.py
|
filter_greys_using_image
|
python
|
def filter_greys_using_image(image, target):
"""Filter out any values in target not in image
:param image: image containing values to appear in filtered image
:param target: the image to filter
:rtype: 2d :class:`numpy.ndarray` containing only value in image
and with the same dimensions as target
"""
maskbase = numpy.array(range(256), dtype=numpy.uint8)
mask = numpy.where(numpy.in1d(maskbase, numpy.unique(image)), maskbase, 0)
return mask[target]
|
Filter out any values in target not in image
:param image: image containing values to appear in filtered image
:param target: the image to filter
:rtype: 2d :class:`numpy.ndarray` containing only value in image
and with the same dimensions as target
|
train
|
https://github.com/ten10solutions/Geist/blob/a1ef16d8b4c3777735008b671a50acfde3ce7bf1/geist/vision.py#L488-L499
| null |
from __future__ import division
from numpy.fft import irfft2, rfft2
import numpy
import itertools
import operator
def subimage(rect, image):
x, y, w, h = rect
return image[y:y + h, x:x + w]
def pad_bin_image_to_shape(image, shape):
"""
Padd image to size :shape: with zeros
"""
h, w = shape
ih, iw = image.shape
assert ih <= h
assert iw <= w
if iw < w:
result = numpy.hstack((image, numpy.zeros((ih, w - iw), bool)))
else:
result = image
if ih < h:
result = numpy.vstack((result, numpy.zeros((h - ih, w), bool)))
return result
def sum_2d_images(images):
it = iter(images)
result = numpy.copy(next(it))
for image in it:
result += image
return result
OVERLAP_TABLE = {
16: (16, 8, 4, 2, 1),
15: (15, 5, 3, 1),
12: (12, 6, 4, 3, 2, 1),
10: (10, 5, 2, 1),
9: (9, 3, 1),
8: (8, 4, 2, 1),
6: (6, 3, 2, 1),
5: (5, 1),
4: (4, 2, 1),
3: (3, 1),
2: (2, 1)
}
# A number near float max which we don't want to get near to keep precision
# If you get false matches consider reducing this number.
ACCURACY_LIMIT = 2 ** (64 - 23)
def best_convolution(bin_template, bin_image,
tollerance=0.5, overlap_table=OVERLAP_TABLE):
"""
Selects and applies the best convolution method to find template in image.
Returns a list of matches in (width, height, x offset, y offset)
format (where the x and y offsets are from the top left corner).
As the images are binary images, we can utilise the extra bit space in the
float64's by cutting the image into tiles and stacking them into variable
grayscale values.
This allows converting a sparse binary image into a dense(r) grayscale one.
"""
template_sum = numpy.count_nonzero(bin_template)
th, tw = bin_template.shape
ih, iw = bin_image.shape
if template_sum == 0 or th == 0 or tw == 0:
# If we don't have a template
return []
if th > ih or tw > iw:
# If the template is bigger than the image
return []
# How many cells can we split the image into?
max_vert_cells = ih // th
max_hor_cells = iw // th
# Try to work out how many times we can stack the image
usable_factors = {n: factors for n, factors in overlap_table.iteritems()
if ((template_sum + 1) ** (n)) < ACCURACY_LIMIT}
overlap_options = [(factor, n // factor)
for n, factors in usable_factors.iteritems()
for factor in factors
if (factor <= max_vert_cells and
n // factor <= max_hor_cells)]
if not overlap_options:
# We can't stack the image
return convolution(bin_template, bin_image, tollerance=tollerance)
best_overlap = min(overlap_options,
key=lambda x: ((ih // x[0] + th) * (iw // x[1] + tw)))
return overlapped_convolution(bin_template, bin_image,
tollerance=tollerance, splits=best_overlap)
def convolution(bin_template, bin_image, tollerance=0.5):
expected = numpy.count_nonzero(bin_template)
ih, iw = bin_image.shape
th, tw = bin_template.shape
# Padd image to even dimensions
if ih % 2 or iw % 2:
if ih % 2:
ih += 1
if iw % 2:
iw += 1
bin_image = pad_bin_image_to_shape(bin_image, (ih, iw))
if expected == 0:
return []
# Calculate the convolution of the FFT's of the image & template
convolution_freqs = rfft2(bin_image) * rfft2(bin_template[::-1, ::-1],
bin_image.shape)
# Reverse the FFT to find the result image
convolution_image = irfft2(convolution_freqs)
# At this point, the maximum point in convolution_image should be the
# bottom right (why?) of the area of greatest match
# The areas in the result image within expected +- tollerance are where we
# saw matches
found_bitmap = ((convolution_image > (expected - tollerance)) &
(convolution_image < (expected + tollerance)))
match_points = numpy.transpose(numpy.nonzero(found_bitmap)) # bottom right
# Find the top left point from the template (remember match_point is
# inside the template (hence -1)
return [((fx - (tw - 1)), (fy - (th - 1))) for (fy, fx) in match_points]
def overlapped_convolution(bin_template, bin_image,
tollerance=0.5, splits=(4, 2)):
"""
As each of these images are hold only binary values, and RFFT2 works on
float64 greyscale values, we can make the convolution more efficient by
breaking the image up into :splits: sectons. Each one of these sections
then has its greyscale value adjusted and then stacked.
We then apply the convolution to this 'stack' of images, and adjust the
resultant position matches.
"""
th, tw = bin_template.shape
ih, iw = bin_image.shape
hs, ws = splits
h = ih // hs
w = iw // ws
count = numpy.count_nonzero(bin_template)
assert count > 0
assert h >= th
assert w >= tw
yoffset = [(i * h, ((i + 1) * h) + (th - 1)) for i in range(hs)]
xoffset = [(i * w, ((i + 1) * w) + (tw - 1)) for i in range(ws)]
# image_stacks is Origin (x,y), array, z (height in stack)
image_stacks = [((x1, y1), bin_image[y1:y2, x1:x2], float((count + 1) ** (num)))
for num, (x1, x2, y1, y2) in
enumerate((x1, x2, y1, y2) for (x1, x2)
in xoffset for (y1, y2) in yoffset)]
pad_h = max(i.shape[0] for _, i, _ in image_stacks)
pad_w = max(i.shape[1] for _, i, _ in image_stacks)
# rfft metrics must be an even size - why ... maths?
pad_w += pad_w % 2
pad_h += pad_h % 2
overlapped_image = sum_2d_images(pad_bin_image_to_shape(i, (pad_h, pad_w))
* num for _, i, num in image_stacks)
#print "Overlap splits %r, Image Size (%d,%d),
#Overlapped Size (%d,%d)" % (splits,iw,ih,pad_w,pad_h)
# Calculate the convolution of the FFT's of the overlapped image & template
convolution_freqs = (rfft2(overlapped_image) *
rfft2(bin_template[::-1, ::-1],
overlapped_image.shape))
# Reverse the FFT to find the result overlapped image
convolution_image = irfft2(convolution_freqs)
# At this point, the maximum point in convolution_image should be the
# bottom right (why?) of the area of greatest match
results = set()
for (x, y), _, num in image_stacks[::-1]:
test = convolution_image / num
filtered = ((test >= (count - tollerance)) &
(test <= (count + tollerance)))
match_points = numpy.transpose(numpy.nonzero(filtered)) # bottom right
for (fy, fx) in match_points:
if fx < (tw - 1) or fy < (th - 1):
continue
results.add((x + fx - (tw - 1), y + fy - (th - 1)))
convolution_image %= num
return list(results)
def get_possible_convolution_regions(bin_template, bin_image,
tollerance=0.5, rescale=10):
result = []
h, w = bin_template.shape[:2]
rects = set((x, y, w, h) for x, y in convolution_r3m_targets(
bin_template, bin_image, tollerance))
ih, iw = bin_image.shape[:2]
# areas of interest
aoi = numpy.zeros((ih // rescale + 1, iw // rescale + 1), bool)
for x, y, w, h in rects:
aoi[y // rescale:(y + h) // rescale + 1,
x // rescale:(x + w) // rescale + 1] = True
bp = binary_partition_image(aoi, min_w=w // rescale, min_h=h // rescale)
if bp:
bp = prune_unbeneficial_partitions(aoi, bp)
return result
def binary_partition_to_rects(bp, image, template_w, template_h,
xoffset=0, yoffset=0):
h, w = image.shape[2:]
if bp is None:
return [(xoffset, yoffset, w, h)]
pos, axis, (p1, p2) = bp
if axis == 0:
new_xoffset, new_yoffset = xoffset, yoffset + pos
i1, i2 = image[pos:], image[:pos]
else:
new_xoffset, new_yoffset = xoffset + pos, yoffset
i1, i2 = image[:, pos:], image[:, :pos]
def prune_unbeneficial_partitions(image, bp):
pos, axis, (p1, p2) = bp
if axis == 0:
i1, i2 = image[pos:], image[:pos]
else:
i1, i2 = image[:, pos:], image[:, :pos]
if p1 is None:
p1_result = numpy.count_nonzero(i1) == 0
else:
p1_result = prune_unbeneficial_partitions(i1, p1)
if p2 is None:
p2_result = numpy.count_nonzero(i2) == 0
else:
p2_result = prune_unbeneficial_partitions(i2, p2)
if p1_result or p2_result:
return [pos,
axis,
[None if p1_result in [True, False] else p1_result,
None if p2_result in [True, False] else p2_result]]
else:
return None
def get_partition_scores(image, min_w=1, min_h=1):
"""Return list of best to worst binary splits along the x and y axis.
"""
h, w = image.shape[:2]
if w == 0 or h == 0:
return []
area = h * w
cnz = numpy.count_nonzero
total = cnz(image)
if total == 0 or area == total:
return []
if h < min_h * 2:
y_c = []
else:
y_c = [(-abs((count / ((h - y) * w)) - ((total - count) / (y * w))),
y, 0)
for count, y in ((cnz(image[y:]), y)
for y in range(min_h, image.shape[0] - min_h))]
if w < min_w * 2:
x_c = []
else:
x_c = [(-abs((count / (h * (w - x))) - ((total - count) / (h * x))),
x, 1)
for count, x in ((cnz(image[:, x:]), x)
for x in range(min_w, image.shape[1] - min_w))]
return sorted(x_c + y_c)
def get_best_partition(image, min_w=1, min_h=1):
partitions = get_partition_scores(image, min_w=min_w, min_h=min_h)
if partitions:
return partitions[0][-2:]
else:
return None
def binary_partition_image(image, min_w=1, min_h=1, depth=0, max_depth=-1):
"""Return a bsp of [pos, axis, [before_node, after_node]] nodes where leaf
nodes == None.
If max_depth < 0 this function will continue until all leaf nodes have
been found, if it is >= 0 leaf nodes will be created at that depth.
min_w and min_h are the minimum width or height of a partition.
"""
if max_depth >= 0 and depth >= max_depth:
return None
partition = get_best_partition(image, min_w=min_w, min_h=min_h)
if partition is None:
return None
pos, axis = partition
if axis == 0:
p1 = binary_partition_image(
image[pos:], min_w, min_h, depth + 1, max_depth)
p2 = binary_partition_image(
image[:pos], min_w, min_h, depth + 1, max_depth)
elif axis == 1:
p1 = binary_partition_image(
image[:, pos:], min_w, min_h, depth + 1, max_depth)
p2 = binary_partition_image(
image[:, :pos], min_w, min_h, depth + 1, max_depth)
return [pos, axis, [p1, p2]]
def draw_binary_partition(image, subdiv, res_image=None, counter=None):
if res_image is None:
res_image = numpy.zeros(image.shape)
if counter is None:
counter = itertools.count(15)
pos, axis, (p1, p2) = subdiv
if axis == 0:
s1, s2 = res_image[pos:], res_image[:pos]
i1, i2 = image[pos:], image[:pos]
else:
s1, s2 = res_image[:, pos:], res_image[:, :pos]
i1, i2 = image[:, pos:], image[:, :pos]
if p1 is None:
if numpy.count_nonzero(i1):
s1[:] = next(counter)
else:
draw_binary_partition(i1, p1, s1, counter)
if p2 is None:
if numpy.count_nonzero(i2):
s2[:] = next(counter)
else:
draw_binary_partition(i2, p2, s2, counter)
return res_image
def rescale2avg(image):
sub1 = image[:-1:2, :-1:2]
sub2 = image[1::2, 1::2]
res = numpy.zeros(sub1.shape, numpy.uint32)
res += sub1
res += sub2
res /= 2
return res.astype(numpy.uint8)
def rescale2max(image):
sub1 = image[:-1:2, :-1:2]
sub2 = image[1::2, 1::2]
if len(image.shape) == 3:
max_map = grey_scale(sub1) > grey_scale(sub2)
else:
max_map = sub1 > sub2
inv_max_map = max_map == False
res = numpy.zeros(sub1.shape, numpy.uint8)
res[max_map] = sub1[max_map]
res[inv_max_map] = sub2[inv_max_map]
return res
def rescale3avg(image):
sub1 = image[:-2:3, :-2:3]
sub2 = image[1:-1:3, 1:-1:3]
sub3 = image[2::3, 2::3]
res = numpy.zeros(sub1.shape, numpy.uint32)
res += sub1
res += sub2
res += sub3
res /= 3
return res.astype(numpy.uint8)
def rescale3max(image):
sub1 = image[:-2:3, :-2:3]
sub2 = image[1:-1:3, 1:-1:3]
sub3 = image[2::3, 2::3]
if len(image.shape) == 3:
grey1 = grey_scale(sub1)
grey2 = grey_scale(sub2)
grey3 = grey_scale(sub3)
else:
grey1, grey2, grey3 = sub1, sub2, sub3
max_map_1 = (grey1 > grey2) & (grey1 > grey3)
max_map_2 = (grey2 > grey1) & (grey2 > grey3)
max_map_3 = (max_map_1 | max_map_2) == False
res = numpy.zeros(sub1.shape, numpy.uint8)
res[max_map_1] = sub1[max_map_1]
res[max_map_2] = sub2[max_map_2]
res[max_map_3] = sub3[max_map_3]
return res
def or_reduce_rescale3max_offset(image):
return reduce(operator.or_,
(rescale3max(image[y1:y2, x1:x2]) for (y1, y2), (x1, x2)
in itertools.product(
*[[(i, -(3 - i)) for i in range(3)]] * 2)
)
)
def numpy_or_all(images):
it = iter(images)
result = numpy.copy(next(it))
for image in it:
result |= image
return result
def grey_scale(image):
"""Converts RGB image to Greyscale
:param image: input image
:type image: 3 channel 3d :class:`numpy.ndarray`
:rtype: :class:`numpy.ndarray`
"""
return image.astype(numpy.int32).sum(axis=2) // 3
def find_edges(image):
base = image[:, :, numpy.newaxis].astype(numpy.int16)
c = base[1:-1, 1:-1]
e = base[1:-1, :-2]
w = base[1:-1, 2:]
s = base[:-2, 1:-1]
n = base[2:, 1:-1]
diffs = numpy.concatenate([c-e, c-w, c-s, c-n], axis=2).max(axis=2)
diffs[diffs < 0] = 0
dh, dw = diffs.shape
col = numpy.zeros((dh, 1), numpy.uint8)
row = numpy.zeros((1, dw+2), numpy.uint8)
return numpy.vstack(
(row, numpy.hstack((col, diffs.astype(numpy.uint8), col)), row)
)
def find_threshold_near_density(img, density, low=0, high=255):
"""Find a threshold where the fraction of pixels above the threshold
is closest to density where density is (count of pixels above
threshold / count of pixels).
The highest threshold closest to the desired density will be returned.
Use low and high to exclude undesirable thresholds.
:param img: target image
:type img: 2d :class:`numpy.ndarray`
:param density: target density
:type density: float between 0.0 and 1.0
:param low: min threshold to test
:type low: ubyte
:param migh: max threshold to test
:type low: ubyte
:rtype: ubyte
"""
size = numpy.size(img)
densities = []
last_t = None
while True:
t = ((high - low) // 2) + low
if t == last_t:
densities.sort(key=lambda x: (abs(x[0] - density), 256 - x[1]))
return densities[0][1]
else:
last_t = t
d = numpy.count_nonzero(img > t) / size
densities.append((d, t))
if d < density:
high = t
elif d >= density: # search away from low
low = t
def filter_greys_using_image(image, target):
"""Filter out any values in target not in image
:param image: image containing values to appear in filtered image
:param target: the image to filter
:rtype: 2d :class:`numpy.ndarray` containing only value in image
and with the same dimensions as target
"""
maskbase = numpy.array(range(256), dtype=numpy.uint8)
mask = numpy.where(numpy.in1d(maskbase, numpy.unique(image)), maskbase, 0)
return mask[target]
def correlation_coefficient_normed(template, image):
h, w = template.shape[:2]
H, W = image.shape[:2]
template_size = template.size
template_distance = template - (template.sum() / template_size)
corr_num = numpy.zeros((H, W), numpy.float64)
corr_denum = numpy.zeros((H, W), numpy.float64)
for y in xrange(H):
for x in xrange(W):
image_in_template_area = image[y:y + h, x:x + w]
image_distance_of_template_area = (
image_in_template_area - (
image_in_template_area.sum() /
template_size
)
)
I_H, I_W = image_distance_of_template_area.shape[:2]
sum_of_template_by_image_distance_at_xy = (
image_distance_of_template_area * template_distance[:I_H, :I_W]
).sum()
corr_num[y, x] = sum_of_template_by_image_distance_at_xy
corr_denum[y, x] = numpy.sqrt(
(template_distance ** 2).sum() *
(image_distance_of_template_area ** 2).sum()
)
print y
return corr_num / corr_denum
|
ten10solutions/Geist
|
geist/match_position_finder_helpers.py
|
find_potential_match_regions
|
python
|
def find_potential_match_regions(template, transformed_array, method='correlation', raw_tolerance=0.666):
if method == 'correlation':
match_value = np.sum(template**2) # this will be the value of the match in the
elif method == 'squared difference':
match_value = 0
elif method == 'correlation coefficient':
temp_minus_mean = template - np.mean(template)
match_value = np.sum(temp_minus_mean**2)
else:
raise ValueError('Matching method not implemented')
condition = ((np.round(transformed_array, decimals=3)>=match_value*raw_tolerance) &
(np.round(transformed_array, decimals=3)<=match_value*(1./raw_tolerance)))
return np.transpose(condition.nonzero())
|
To prevent prohibitively slow calculation of normalisation coefficient at each point in image
find potential match points, and normalise these only these.
This function uses the definitions of the matching functions to calculate the expected match value
and finds positions in the transformed array matching these- normalisation will then eliminate false positives
|
train
|
https://github.com/ten10solutions/Geist/blob/a1ef16d8b4c3777735008b671a50acfde3ce7bf1/geist/match_position_finder_helpers.py#L4-L21
| null |
import numpy as np
# trsnposition and omparison above take most time
# correlation coefficient matches at top left- perfect for tiling
# correlation matches to bottom right- so requires transformation for tiling
def get_tiles_at_potential_match_regions(image, template, transformed_array, method='correlation', raw_tolerance=0.001):
if method not in ['correlation', 'correlation coefficient', 'squared difference']:
raise ValueError('Matching method not implemented')
h, w = template.shape
match_points = find_potential_match_regions(template, transformed_array, method=method, raw_tolerance=raw_tolerance)
match_points = [(match[0], match[1]) for match in match_points]
# create tile for each match point- use dict so we know which match point it applies to
# match point here is position of top left pixel of tile
image_tiles_dict = {match_points[i]:image[match_points[i][0]:match_points[i][0]+h,match_points[i][1]:match_points[i][1]+w] for i in range(len(match_points))}
return image_tiles_dict
###############################################
# image tiles dict is of form match_point coord:tile at that point
def normalise_correlation(image_tile_dict, transformed_array, template, normed_tolerance=1):
"""Calculates the normalisation coefficients of potential match positions
Then normalises the correlation at these positions, and returns them
if they do indeed constitute a match
"""
template_norm = np.linalg.norm(template)
image_norms = {(x,y):np.linalg.norm(image_tile_dict[(x,y)])*template_norm for (x,y) in image_tile_dict.keys()}
match_points = image_tile_dict.keys()
# for correlation, then need to transofrm back to get correct value for division
h, w = template.shape
#points_from_transformed_array = [(match[0] + h - 1, match[1] + w - 1) for match in match_points]
image_matches_normalised = {match_points[i]:transformed_array[match_points[i][0], match_points[i][1]]/image_norms[match_points[i]] for i in range(len(match_points))}
result = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) >= normed_tolerance}
return result.keys()
# image tiles dict is of form match_point coord:tile at that point
def normalise_correlation_coefficient(image_tile_dict, transformed_array, template, normed_tolerance=1):
"""As above, but for when the correlation coefficient matching method is used
"""
template_mean = np.mean(template)
template_minus_mean = template - template_mean
template_norm = np.linalg.norm(template_minus_mean)
image_norms = {(x,y):np.linalg.norm(image_tile_dict[(x,y)]- np.mean(image_tile_dict[(x,y)]))*template_norm for (x,y) in image_tile_dict.keys()}
match_points = image_tile_dict.keys()
# for correlation, then need to transofrm back to get correct value for division
h, w = template.shape
image_matches_normalised = {match_points[i]:transformed_array[match_points[i][0], match_points[i][1]]/image_norms[match_points[i]] for i in range(len(match_points))}
normalised_matches = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) >= normed_tolerance}
return normalised_matches.keys()
def calculate_squared_differences(image_tile_dict, transformed_array, template, sq_diff_tolerance=0.1):
"""As above, but for when the squared differences matching method is used
"""
template_norm_squared = np.sum(template**2)
image_norms_squared = {(x,y):np.sum(image_tile_dict[(x,y)]**2) for (x,y) in image_tile_dict.keys()}
match_points = image_tile_dict.keys()
# for correlation, then need to transofrm back to get correct value for division
h, w = template.shape
image_matches_normalised = {match_points[i]:-2*transformed_array[match_points[i][0], match_points[i][1]] + image_norms_squared[match_points[i]] + template_norm_squared for i in range(len(match_points))}
#print image_matches_normalised
cutoff = h*w*255**2*sq_diff_tolerance
normalised_matches = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) <= cutoff}
return normalised_matches.keys()
|
ten10solutions/Geist
|
geist/match_position_finder_helpers.py
|
normalise_correlation
|
python
|
def normalise_correlation(image_tile_dict, transformed_array, template, normed_tolerance=1):
template_norm = np.linalg.norm(template)
image_norms = {(x,y):np.linalg.norm(image_tile_dict[(x,y)])*template_norm for (x,y) in image_tile_dict.keys()}
match_points = image_tile_dict.keys()
# for correlation, then need to transofrm back to get correct value for division
h, w = template.shape
#points_from_transformed_array = [(match[0] + h - 1, match[1] + w - 1) for match in match_points]
image_matches_normalised = {match_points[i]:transformed_array[match_points[i][0], match_points[i][1]]/image_norms[match_points[i]] for i in range(len(match_points))}
result = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) >= normed_tolerance}
return result.keys()
|
Calculates the normalisation coefficients of potential match positions
Then normalises the correlation at these positions, and returns them
if they do indeed constitute a match
|
train
|
https://github.com/ten10solutions/Geist/blob/a1ef16d8b4c3777735008b671a50acfde3ce7bf1/geist/match_position_finder_helpers.py#L44-L57
| null |
import numpy as np
def find_potential_match_regions(template, transformed_array, method='correlation', raw_tolerance=0.666):
"""To prevent prohibitively slow calculation of normalisation coefficient at each point in image
find potential match points, and normalise these only these.
This function uses the definitions of the matching functions to calculate the expected match value
and finds positions in the transformed array matching these- normalisation will then eliminate false positives
"""
if method == 'correlation':
match_value = np.sum(template**2) # this will be the value of the match in the
elif method == 'squared difference':
match_value = 0
elif method == 'correlation coefficient':
temp_minus_mean = template - np.mean(template)
match_value = np.sum(temp_minus_mean**2)
else:
raise ValueError('Matching method not implemented')
condition = ((np.round(transformed_array, decimals=3)>=match_value*raw_tolerance) &
(np.round(transformed_array, decimals=3)<=match_value*(1./raw_tolerance)))
return np.transpose(condition.nonzero())# trsnposition and omparison above take most time
# correlation coefficient matches at top left- perfect for tiling
# correlation matches to bottom right- so requires transformation for tiling
def get_tiles_at_potential_match_regions(image, template, transformed_array, method='correlation', raw_tolerance=0.001):
if method not in ['correlation', 'correlation coefficient', 'squared difference']:
raise ValueError('Matching method not implemented')
h, w = template.shape
match_points = find_potential_match_regions(template, transformed_array, method=method, raw_tolerance=raw_tolerance)
match_points = [(match[0], match[1]) for match in match_points]
# create tile for each match point- use dict so we know which match point it applies to
# match point here is position of top left pixel of tile
image_tiles_dict = {match_points[i]:image[match_points[i][0]:match_points[i][0]+h,match_points[i][1]:match_points[i][1]+w] for i in range(len(match_points))}
return image_tiles_dict
###############################################
# image tiles dict is of form match_point coord:tile at that point
# image tiles dict is of form match_point coord:tile at that point
def normalise_correlation_coefficient(image_tile_dict, transformed_array, template, normed_tolerance=1):
"""As above, but for when the correlation coefficient matching method is used
"""
template_mean = np.mean(template)
template_minus_mean = template - template_mean
template_norm = np.linalg.norm(template_minus_mean)
image_norms = {(x,y):np.linalg.norm(image_tile_dict[(x,y)]- np.mean(image_tile_dict[(x,y)]))*template_norm for (x,y) in image_tile_dict.keys()}
match_points = image_tile_dict.keys()
# for correlation, then need to transofrm back to get correct value for division
h, w = template.shape
image_matches_normalised = {match_points[i]:transformed_array[match_points[i][0], match_points[i][1]]/image_norms[match_points[i]] for i in range(len(match_points))}
normalised_matches = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) >= normed_tolerance}
return normalised_matches.keys()
def calculate_squared_differences(image_tile_dict, transformed_array, template, sq_diff_tolerance=0.1):
"""As above, but for when the squared differences matching method is used
"""
template_norm_squared = np.sum(template**2)
image_norms_squared = {(x,y):np.sum(image_tile_dict[(x,y)]**2) for (x,y) in image_tile_dict.keys()}
match_points = image_tile_dict.keys()
# for correlation, then need to transofrm back to get correct value for division
h, w = template.shape
image_matches_normalised = {match_points[i]:-2*transformed_array[match_points[i][0], match_points[i][1]] + image_norms_squared[match_points[i]] + template_norm_squared for i in range(len(match_points))}
#print image_matches_normalised
cutoff = h*w*255**2*sq_diff_tolerance
normalised_matches = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) <= cutoff}
return normalised_matches.keys()
|
ten10solutions/Geist
|
geist/match_position_finder_helpers.py
|
normalise_correlation_coefficient
|
python
|
def normalise_correlation_coefficient(image_tile_dict, transformed_array, template, normed_tolerance=1):
template_mean = np.mean(template)
template_minus_mean = template - template_mean
template_norm = np.linalg.norm(template_minus_mean)
image_norms = {(x,y):np.linalg.norm(image_tile_dict[(x,y)]- np.mean(image_tile_dict[(x,y)]))*template_norm for (x,y) in image_tile_dict.keys()}
match_points = image_tile_dict.keys()
# for correlation, then need to transofrm back to get correct value for division
h, w = template.shape
image_matches_normalised = {match_points[i]:transformed_array[match_points[i][0], match_points[i][1]]/image_norms[match_points[i]] for i in range(len(match_points))}
normalised_matches = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) >= normed_tolerance}
return normalised_matches.keys()
|
As above, but for when the correlation coefficient matching method is used
|
train
|
https://github.com/ten10solutions/Geist/blob/a1ef16d8b4c3777735008b671a50acfde3ce7bf1/geist/match_position_finder_helpers.py#L61-L73
| null |
import numpy as np
def find_potential_match_regions(template, transformed_array, method='correlation', raw_tolerance=0.666):
"""To prevent prohibitively slow calculation of normalisation coefficient at each point in image
find potential match points, and normalise these only these.
This function uses the definitions of the matching functions to calculate the expected match value
and finds positions in the transformed array matching these- normalisation will then eliminate false positives
"""
if method == 'correlation':
match_value = np.sum(template**2) # this will be the value of the match in the
elif method == 'squared difference':
match_value = 0
elif method == 'correlation coefficient':
temp_minus_mean = template - np.mean(template)
match_value = np.sum(temp_minus_mean**2)
else:
raise ValueError('Matching method not implemented')
condition = ((np.round(transformed_array, decimals=3)>=match_value*raw_tolerance) &
(np.round(transformed_array, decimals=3)<=match_value*(1./raw_tolerance)))
return np.transpose(condition.nonzero())# trsnposition and omparison above take most time
# correlation coefficient matches at top left- perfect for tiling
# correlation matches to bottom right- so requires transformation for tiling
def get_tiles_at_potential_match_regions(image, template, transformed_array, method='correlation', raw_tolerance=0.001):
if method not in ['correlation', 'correlation coefficient', 'squared difference']:
raise ValueError('Matching method not implemented')
h, w = template.shape
match_points = find_potential_match_regions(template, transformed_array, method=method, raw_tolerance=raw_tolerance)
match_points = [(match[0], match[1]) for match in match_points]
# create tile for each match point- use dict so we know which match point it applies to
# match point here is position of top left pixel of tile
image_tiles_dict = {match_points[i]:image[match_points[i][0]:match_points[i][0]+h,match_points[i][1]:match_points[i][1]+w] for i in range(len(match_points))}
return image_tiles_dict
###############################################
# image tiles dict is of form match_point coord:tile at that point
def normalise_correlation(image_tile_dict, transformed_array, template, normed_tolerance=1):
"""Calculates the normalisation coefficients of potential match positions
Then normalises the correlation at these positions, and returns them
if they do indeed constitute a match
"""
template_norm = np.linalg.norm(template)
image_norms = {(x,y):np.linalg.norm(image_tile_dict[(x,y)])*template_norm for (x,y) in image_tile_dict.keys()}
match_points = image_tile_dict.keys()
# for correlation, then need to transofrm back to get correct value for division
h, w = template.shape
#points_from_transformed_array = [(match[0] + h - 1, match[1] + w - 1) for match in match_points]
image_matches_normalised = {match_points[i]:transformed_array[match_points[i][0], match_points[i][1]]/image_norms[match_points[i]] for i in range(len(match_points))}
result = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) >= normed_tolerance}
return result.keys()
# image tiles dict is of form match_point coord:tile at that point
def calculate_squared_differences(image_tile_dict, transformed_array, template, sq_diff_tolerance=0.1):
"""As above, but for when the squared differences matching method is used
"""
template_norm_squared = np.sum(template**2)
image_norms_squared = {(x,y):np.sum(image_tile_dict[(x,y)]**2) for (x,y) in image_tile_dict.keys()}
match_points = image_tile_dict.keys()
# for correlation, then need to transofrm back to get correct value for division
h, w = template.shape
image_matches_normalised = {match_points[i]:-2*transformed_array[match_points[i][0], match_points[i][1]] + image_norms_squared[match_points[i]] + template_norm_squared for i in range(len(match_points))}
#print image_matches_normalised
cutoff = h*w*255**2*sq_diff_tolerance
normalised_matches = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) <= cutoff}
return normalised_matches.keys()
|
ten10solutions/Geist
|
geist/match_position_finder_helpers.py
|
calculate_squared_differences
|
python
|
def calculate_squared_differences(image_tile_dict, transformed_array, template, sq_diff_tolerance=0.1):
template_norm_squared = np.sum(template**2)
image_norms_squared = {(x,y):np.sum(image_tile_dict[(x,y)]**2) for (x,y) in image_tile_dict.keys()}
match_points = image_tile_dict.keys()
# for correlation, then need to transofrm back to get correct value for division
h, w = template.shape
image_matches_normalised = {match_points[i]:-2*transformed_array[match_points[i][0], match_points[i][1]] + image_norms_squared[match_points[i]] + template_norm_squared for i in range(len(match_points))}
#print image_matches_normalised
cutoff = h*w*255**2*sq_diff_tolerance
normalised_matches = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) <= cutoff}
return normalised_matches.keys()
|
As above, but for when the squared differences matching method is used
|
train
|
https://github.com/ten10solutions/Geist/blob/a1ef16d8b4c3777735008b671a50acfde3ce7bf1/geist/match_position_finder_helpers.py#L77-L89
| null |
import numpy as np
def find_potential_match_regions(template, transformed_array, method='correlation', raw_tolerance=0.666):
"""To prevent prohibitively slow calculation of normalisation coefficient at each point in image
find potential match points, and normalise these only these.
This function uses the definitions of the matching functions to calculate the expected match value
and finds positions in the transformed array matching these- normalisation will then eliminate false positives
"""
if method == 'correlation':
match_value = np.sum(template**2) # this will be the value of the match in the
elif method == 'squared difference':
match_value = 0
elif method == 'correlation coefficient':
temp_minus_mean = template - np.mean(template)
match_value = np.sum(temp_minus_mean**2)
else:
raise ValueError('Matching method not implemented')
condition = ((np.round(transformed_array, decimals=3)>=match_value*raw_tolerance) &
(np.round(transformed_array, decimals=3)<=match_value*(1./raw_tolerance)))
return np.transpose(condition.nonzero())# trsnposition and omparison above take most time
# correlation coefficient matches at top left- perfect for tiling
# correlation matches to bottom right- so requires transformation for tiling
def get_tiles_at_potential_match_regions(image, template, transformed_array, method='correlation', raw_tolerance=0.001):
if method not in ['correlation', 'correlation coefficient', 'squared difference']:
raise ValueError('Matching method not implemented')
h, w = template.shape
match_points = find_potential_match_regions(template, transformed_array, method=method, raw_tolerance=raw_tolerance)
match_points = [(match[0], match[1]) for match in match_points]
# create tile for each match point- use dict so we know which match point it applies to
# match point here is position of top left pixel of tile
image_tiles_dict = {match_points[i]:image[match_points[i][0]:match_points[i][0]+h,match_points[i][1]:match_points[i][1]+w] for i in range(len(match_points))}
return image_tiles_dict
###############################################
# image tiles dict is of form match_point coord:tile at that point
def normalise_correlation(image_tile_dict, transformed_array, template, normed_tolerance=1):
"""Calculates the normalisation coefficients of potential match positions
Then normalises the correlation at these positions, and returns them
if they do indeed constitute a match
"""
template_norm = np.linalg.norm(template)
image_norms = {(x,y):np.linalg.norm(image_tile_dict[(x,y)])*template_norm for (x,y) in image_tile_dict.keys()}
match_points = image_tile_dict.keys()
# for correlation, then need to transofrm back to get correct value for division
h, w = template.shape
#points_from_transformed_array = [(match[0] + h - 1, match[1] + w - 1) for match in match_points]
image_matches_normalised = {match_points[i]:transformed_array[match_points[i][0], match_points[i][1]]/image_norms[match_points[i]] for i in range(len(match_points))}
result = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) >= normed_tolerance}
return result.keys()
# image tiles dict is of form match_point coord:tile at that point
def normalise_correlation_coefficient(image_tile_dict, transformed_array, template, normed_tolerance=1):
"""As above, but for when the correlation coefficient matching method is used
"""
template_mean = np.mean(template)
template_minus_mean = template - template_mean
template_norm = np.linalg.norm(template_minus_mean)
image_norms = {(x,y):np.linalg.norm(image_tile_dict[(x,y)]- np.mean(image_tile_dict[(x,y)]))*template_norm for (x,y) in image_tile_dict.keys()}
match_points = image_tile_dict.keys()
# for correlation, then need to transofrm back to get correct value for division
h, w = template.shape
image_matches_normalised = {match_points[i]:transformed_array[match_points[i][0], match_points[i][1]]/image_norms[match_points[i]] for i in range(len(match_points))}
normalised_matches = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) >= normed_tolerance}
return normalised_matches.keys()
|
ten10solutions/Geist
|
geist/backends/_x11_common.py
|
GeistXBase.create_process
|
python
|
def create_process(self, command, shell=True, stdout=None, stderr=None,
env=None):
env = env if env is not None else dict(os.environ)
env['DISPLAY'] = self.display
return subprocess.Popen(command, shell=shell,
stdout=stdout, stderr=stderr,
env=env)
|
Execute a process using subprocess.Popen, setting the backend's DISPLAY
|
train
|
https://github.com/ten10solutions/Geist/blob/a1ef16d8b4c3777735008b671a50acfde3ce7bf1/geist/backends/_x11_common.py#L54-L63
| null |
class GeistXBase(object):
KEY_NAME_TO_CODE = keysyms
KEY_NAME_TO_CODE_IGNORE_CASE = {name.lower(): value
for name, value in keysyms.iteritems()}
def __init__(self, **kwargs):
display = kwargs.get('display', ':0')
self._display = display
self._conn = ooxcb.connect(display)
self._root = self._conn.setup.roots[self._conn.pref_screen].root
@property
def display(self):
return self._display
def actions_transaction(self):
return _ActionsTransaction(self)
def _get_key_code_from_name(self, name):
if name == 'shift':
symb = GeistXBase.KEY_NAME_TO_CODE['Shift_L']
elif name in GeistXBase.KEY_NAME_TO_CODE:
symb = GeistXBase.KEY_NAME_TO_CODE[name]
elif name.lower() in GeistXBase.KEY_NAME_TO_CODE_IGNORE_CASE:
symb = GeistXBase.KEY_NAME_TO_CODE_IGNORE_CASE[name]
else:
raise ValueError('unhandled key %r' % (name,))
return self._conn.keysyms.get_keycode(symb)
def key_down(self, name):
key_code = self._get_key_code_from_name(name)
with self._conn.bunch():
self._conn.xtest.fake_input_checked(
KeyPress,
detail=key_code
)
def key_up(self, name):
key_code = self._get_key_code_from_name(name)
with self._conn.bunch():
self._conn.xtest.fake_input_checked(
KeyRelease,
detail=key_code
)
def button_down(self, button_num):
with self._conn.bunch():
self._conn.xtest.fake_input_checked(
ButtonPress,
detail=button_num
)
def button_up(self, button_num):
with self._conn.bunch():
self._conn.xtest.fake_input_checked(
ButtonRelease,
detail=button_num
)
def move(self, point):
x, y = point
with self._conn.bunch():
self._conn.xtest.fake_input_checked(
MotionNotify,
rootX=x,
rootY=y,
)
def cursor_position(self):
reply = self._root.query_pointer().reply()
return reply.root_x, reply.root_y
def close(self):
if hasattr(self, '_conn'):
self._conn.disconnect()
del self._conn
def __del__(self):
self.close()
|
ten10solutions/Geist
|
geist/matchers.py
|
match_via_correlation
|
python
|
def match_via_correlation(image, template, raw_tolerance=1, normed_tolerance=0.9):
h, w = image.shape
th, tw = template.shape
# fft based convolution enables fast matching of large images
correlation = fftconvolve(image, template[::-1,::-1])
# trim the returned image, fftconvolve returns an image of width: (Temp_w-1) + Im_w + (Temp_w -1), likewise height
correlation = correlation[th-1:h, tw-1:w]
# find images regions which are potentially matches
match_position_dict = get_tiles_at_potential_match_regions(image, template, correlation, raw_tolerance=raw_tolerance)
# bright spots in images can lead to false positivies- the normalisation carried out here eliminates those
results = normalise_correlation(match_position_dict, correlation, template, normed_tolerance=normed_tolerance)
return results
|
Matchihng algorithm based on normalised cross correlation.
Using this matching prevents false positives occuring for bright patches in the image
|
train
|
https://github.com/ten10solutions/Geist/blob/a1ef16d8b4c3777735008b671a50acfde3ce7bf1/geist/matchers.py#L8-L22
|
[
"def get_tiles_at_potential_match_regions(image, template, transformed_array, method='correlation', raw_tolerance=0.001):\n if method not in ['correlation', 'correlation coefficient', 'squared difference']:\n raise ValueError('Matching method not implemented')\n h, w = template.shape\n match_points = find_potential_match_regions(template, transformed_array, method=method, raw_tolerance=raw_tolerance)\n match_points = [(match[0], match[1]) for match in match_points]\n # create tile for each match point- use dict so we know which match point it applies to\n # match point here is position of top left pixel of tile\n image_tiles_dict = {match_points[i]:image[match_points[i][0]:match_points[i][0]+h,match_points[i][1]:match_points[i][1]+w] for i in range(len(match_points))}\n return image_tiles_dict\n",
"def normalise_correlation(image_tile_dict, transformed_array, template, normed_tolerance=1):\n \"\"\"Calculates the normalisation coefficients of potential match positions\n Then normalises the correlation at these positions, and returns them\n if they do indeed constitute a match\n \"\"\"\n template_norm = np.linalg.norm(template)\n image_norms = {(x,y):np.linalg.norm(image_tile_dict[(x,y)])*template_norm for (x,y) in image_tile_dict.keys()}\n match_points = image_tile_dict.keys()\n # for correlation, then need to transofrm back to get correct value for division\n h, w = template.shape\n #points_from_transformed_array = [(match[0] + h - 1, match[1] + w - 1) for match in match_points]\n image_matches_normalised = {match_points[i]:transformed_array[match_points[i][0], match_points[i][1]]/image_norms[match_points[i]] for i in range(len(match_points))}\n result = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) >= normed_tolerance}\n return result.keys()\n"
] |
from .match_position_finder_helpers import get_tiles_at_potential_match_regions, normalise_correlation, normalise_correlation_coefficient, find_potential_match_regions
from scipy.signal import fftconvolve
from scipy.ndimage.measurements import label, find_objects
import numpy as np
# both these methods return array of points giving bottom right coordinate of match
def match_via_squared_difference(image, template, raw_tolerance=1, sq_diff_tolerance=0.1):
""" Matchihng algorithm based on normalised cross correlation.
Using this matching prevents false positives occuring for bright patches in the image
"""
h, w = image.shape
th, tw = template.shape
# fft based convolution enables fast matching of large images
correlation = fftconvolve(image, template[::-1,::-1])
# trim the returned image, fftconvolve returns an image of width: (Temp_w-1) + Im_w + (Temp_w -1), likewise height
correlation = correlation[th-1:h, tw-1:w]
# find images regions which are potentially matches
match_position_dict = get_tiles_at_potential_match_regions(image, template, correlation, raw_tolerance=raw_tolerance)
# bright spots in images can lead to false positivies- the normalisation carried out here eliminates those
results = calculate_squared_differences(match_position_dict, correlation, template, sq_diff_tolerance=sq_diff_tolerance)
return results
def match_via_correlation_coefficient(image, template, raw_tolerance=1, normed_tolerance=0.9):
""" Matching algorithm based on 2-dimensional version of Pearson product-moment correlation coefficient.
This is more robust in the case where the match might be scaled or slightly rotated.
From experimentation, this method is less prone to false positives than the correlation method.
"""
h, w = image.shape
th, tw = template.shape
temp_mean = np.mean(template)
temp_minus_mean = template - temp_mean
convolution = fftconvolve(image, temp_minus_mean[::-1,::-1])
convolution = convolution[th-1:h, tw-1:w]
match_position_dict = get_tiles_at_potential_match_regions(image, template, convolution, method='correlation coefficient', raw_tolerance=raw_tolerance)
# this is empty, so think condition is wrong
results = normalise_correlation_coefficient(match_position_dict, convolution, template, normed_tolerance=normed_tolerance)
return results
def fuzzy_match(image, template, normed_tolerance=None, raw_tolerance=None, method='correlation'):
"""Determines, using one of two methods, whether a match(es) is present and returns the positions of
the bottom right corners of the matches.
Fuzzy matches returns regions, so the center of each region is returned as the final match location
USE THIS FUNCTION IF you need to match, e.g. the same image but rendered slightly different with respect to
anti aliasing; the same image on a number of different backgrounds.
The method is the name of the matching method used, the details of this do not matter. Use the default method
unless you have too many false positives, in this case, use the method 'correlation coefficient.' The
correlation coefficient method can also be more robust at matching when the match might not be exact.
The raw_tolerance is the proportion of the value at match positions (i.e. the value returned for an exact match)
that we count as a match. For fuzzy matching, this value will not be exactly the value returned for an exact match
N. B. Lowering raw_tolerance increases the number of potential match tiles requiring normalisation.
This DRAMATICALLY slows down matching as normalisation (a process which eliminates false positives)
The normed_tolerance is how far a potential match value can differ from one after normalisation.
The tolerance values indicated below are from a short investigation, looking to minimise missing items we wish to match,
as all as false positives which inevitably occur when performing fuzzy matching. To generate these values, we
tested maching letters with different type of antialiasing on a number of backgrounds.
"""
if method == 'correlation':
if not raw_tolerance:
raw_tolerance = 0.95
if not normed_tolerance:
normed_tolerance = 0.95
results = np.array(match_via_correlation(image, template, raw_tolerance=raw_tolerance, normed_tolerance=normed_tolerance))
elif method == 'correlation coefficient':
if not raw_tolerance:
raw_tolerance = 0.95
if not normed_tolerance:
normed_tolerance = 0.95
results = np.array(match_via_correlation_coefficient(image, template, raw_tolerance=raw_tolerance, normed_tolerance=normed_tolerance))
elif method == 'squared difference':
if not raw_tolerance:
raw_tolerance = 0.95
if not normed_tolerance:
normed_tolerance = 0.05
results = np.array(match_via_squared_difference(image, template, raw_tolerance=raw_tolerance, sq_diff_tolerance=normed_tolerance))
h, w = image.shape
th, tw = template.shape
results = np.array([(result[0], result[1]) for result in results])
#match_x, match_y = int(np.mean(results[:,1])), int(np.mean(results[:,0]))
results_aggregated_mean_match_position = match_positions((h,w), results)
return results_aggregated_mean_match_position
def match_positions(shape, list_of_coords):
""" In cases where we have multiple matches, each highlighted by a region of coordinates,
we need to separate matches, and find mean of each to return as match position
"""
match_array = np.zeros(shape)
try:
# excpetion hit on this line if nothing in list_of_coords- i.e. no matches
match_array[list_of_coords[:,0],list_of_coords[:,1]] = 1
labelled = label(match_array)
objects = find_objects(labelled[0])
coords = [{'x':(slice_x.start, slice_x.stop),'y':(slice_y.start, slice_y.stop)} for (slice_y,slice_x) in objects]
final_positions = [(int(np.mean(coords[i]['x'])),int(np.mean(coords[i]['y']))) for i in range(len(coords))]
return final_positions
except IndexError:
print 'no matches found'
# this error occurs if no matches are found
return []
## not what we want a all!!! only will take exact matches, defeating entire point
def post_process(image, template, list_of_coords):
h, w = template.shape
for x, y in list_of_coords:
print x-h + 1, y-w + 1
sub_image = image[x-h + 1:x + 1, y-w + 1:y + 1]
print sub_image.shape, template.shape, x, y
if not np.allclose(template, sub_image):
list_of_coords.remove((x,y))
return list_of_coords
def to_rgb(im):
return np.dstack([im.astype(np.uint8)] * 3).copy(order='C')
def highlight_matched_region_no_normalisation(image, template, method='correlation', raw_tolerance=0.666):
conv = fftconvolve(image, template[::-1,::-1])
th, tw = template.shape
r = find_potential_match_regions(template, conv, method=method, raw_tolerance=raw_tolerance)
r_in_image = [(r_x, r_y) for (r_x, r_y) in r if (r_x < image.shape[0] and r_y < image.shape[1])]
im_rgb = to_rgb(image)
for (x,y) in r_in_image:
try:
im_rgb[x-th:x,y-tw:y] = 0, 100, 100
except IndexError:
im_rgb[x,y] = 0, 100, 100
return im_rgb
def highlight_matched_region_normalised(image, shape, list_of_coords):
th, tw = shape
im_rgb = to_rgb(image)
for (x,y) in list_of_coords:
#print (x,y)
try:
im_rgb[x-th:x,y-tw:y] = 0, 100, 100
except IndexError:
im_rgb[x,y] = 0, 100, 100
return im_rgb
|
ten10solutions/Geist
|
geist/matchers.py
|
match_via_squared_difference
|
python
|
def match_via_squared_difference(image, template, raw_tolerance=1, sq_diff_tolerance=0.1):
h, w = image.shape
th, tw = template.shape
# fft based convolution enables fast matching of large images
correlation = fftconvolve(image, template[::-1,::-1])
# trim the returned image, fftconvolve returns an image of width: (Temp_w-1) + Im_w + (Temp_w -1), likewise height
correlation = correlation[th-1:h, tw-1:w]
# find images regions which are potentially matches
match_position_dict = get_tiles_at_potential_match_regions(image, template, correlation, raw_tolerance=raw_tolerance)
# bright spots in images can lead to false positivies- the normalisation carried out here eliminates those
results = calculate_squared_differences(match_position_dict, correlation, template, sq_diff_tolerance=sq_diff_tolerance)
return results
|
Matchihng algorithm based on normalised cross correlation.
Using this matching prevents false positives occuring for bright patches in the image
|
train
|
https://github.com/ten10solutions/Geist/blob/a1ef16d8b4c3777735008b671a50acfde3ce7bf1/geist/matchers.py#L25-L39
|
[
"def get_tiles_at_potential_match_regions(image, template, transformed_array, method='correlation', raw_tolerance=0.001):\n if method not in ['correlation', 'correlation coefficient', 'squared difference']:\n raise ValueError('Matching method not implemented')\n h, w = template.shape\n match_points = find_potential_match_regions(template, transformed_array, method=method, raw_tolerance=raw_tolerance)\n match_points = [(match[0], match[1]) for match in match_points]\n # create tile for each match point- use dict so we know which match point it applies to\n # match point here is position of top left pixel of tile\n image_tiles_dict = {match_points[i]:image[match_points[i][0]:match_points[i][0]+h,match_points[i][1]:match_points[i][1]+w] for i in range(len(match_points))}\n return image_tiles_dict\n"
] |
from .match_position_finder_helpers import get_tiles_at_potential_match_regions, normalise_correlation, normalise_correlation_coefficient, find_potential_match_regions
from scipy.signal import fftconvolve
from scipy.ndimage.measurements import label, find_objects
import numpy as np
# both these methods return array of points giving bottom right coordinate of match
def match_via_correlation(image, template, raw_tolerance=1, normed_tolerance=0.9):
""" Matchihng algorithm based on normalised cross correlation.
Using this matching prevents false positives occuring for bright patches in the image
"""
h, w = image.shape
th, tw = template.shape
# fft based convolution enables fast matching of large images
correlation = fftconvolve(image, template[::-1,::-1])
# trim the returned image, fftconvolve returns an image of width: (Temp_w-1) + Im_w + (Temp_w -1), likewise height
correlation = correlation[th-1:h, tw-1:w]
# find images regions which are potentially matches
match_position_dict = get_tiles_at_potential_match_regions(image, template, correlation, raw_tolerance=raw_tolerance)
# bright spots in images can lead to false positivies- the normalisation carried out here eliminates those
results = normalise_correlation(match_position_dict, correlation, template, normed_tolerance=normed_tolerance)
return results
def match_via_correlation_coefficient(image, template, raw_tolerance=1, normed_tolerance=0.9):
""" Matching algorithm based on 2-dimensional version of Pearson product-moment correlation coefficient.
This is more robust in the case where the match might be scaled or slightly rotated.
From experimentation, this method is less prone to false positives than the correlation method.
"""
h, w = image.shape
th, tw = template.shape
temp_mean = np.mean(template)
temp_minus_mean = template - temp_mean
convolution = fftconvolve(image, temp_minus_mean[::-1,::-1])
convolution = convolution[th-1:h, tw-1:w]
match_position_dict = get_tiles_at_potential_match_regions(image, template, convolution, method='correlation coefficient', raw_tolerance=raw_tolerance)
# this is empty, so think condition is wrong
results = normalise_correlation_coefficient(match_position_dict, convolution, template, normed_tolerance=normed_tolerance)
return results
def fuzzy_match(image, template, normed_tolerance=None, raw_tolerance=None, method='correlation'):
"""Determines, using one of two methods, whether a match(es) is present and returns the positions of
the bottom right corners of the matches.
Fuzzy matches returns regions, so the center of each region is returned as the final match location
USE THIS FUNCTION IF you need to match, e.g. the same image but rendered slightly different with respect to
anti aliasing; the same image on a number of different backgrounds.
The method is the name of the matching method used, the details of this do not matter. Use the default method
unless you have too many false positives, in this case, use the method 'correlation coefficient.' The
correlation coefficient method can also be more robust at matching when the match might not be exact.
The raw_tolerance is the proportion of the value at match positions (i.e. the value returned for an exact match)
that we count as a match. For fuzzy matching, this value will not be exactly the value returned for an exact match
N. B. Lowering raw_tolerance increases the number of potential match tiles requiring normalisation.
This DRAMATICALLY slows down matching as normalisation (a process which eliminates false positives)
The normed_tolerance is how far a potential match value can differ from one after normalisation.
The tolerance values indicated below are from a short investigation, looking to minimise missing items we wish to match,
as all as false positives which inevitably occur when performing fuzzy matching. To generate these values, we
tested maching letters with different type of antialiasing on a number of backgrounds.
"""
if method == 'correlation':
if not raw_tolerance:
raw_tolerance = 0.95
if not normed_tolerance:
normed_tolerance = 0.95
results = np.array(match_via_correlation(image, template, raw_tolerance=raw_tolerance, normed_tolerance=normed_tolerance))
elif method == 'correlation coefficient':
if not raw_tolerance:
raw_tolerance = 0.95
if not normed_tolerance:
normed_tolerance = 0.95
results = np.array(match_via_correlation_coefficient(image, template, raw_tolerance=raw_tolerance, normed_tolerance=normed_tolerance))
elif method == 'squared difference':
if not raw_tolerance:
raw_tolerance = 0.95
if not normed_tolerance:
normed_tolerance = 0.05
results = np.array(match_via_squared_difference(image, template, raw_tolerance=raw_tolerance, sq_diff_tolerance=normed_tolerance))
h, w = image.shape
th, tw = template.shape
results = np.array([(result[0], result[1]) for result in results])
#match_x, match_y = int(np.mean(results[:,1])), int(np.mean(results[:,0]))
results_aggregated_mean_match_position = match_positions((h,w), results)
return results_aggregated_mean_match_position
def match_positions(shape, list_of_coords):
""" In cases where we have multiple matches, each highlighted by a region of coordinates,
we need to separate matches, and find mean of each to return as match position
"""
match_array = np.zeros(shape)
try:
# excpetion hit on this line if nothing in list_of_coords- i.e. no matches
match_array[list_of_coords[:,0],list_of_coords[:,1]] = 1
labelled = label(match_array)
objects = find_objects(labelled[0])
coords = [{'x':(slice_x.start, slice_x.stop),'y':(slice_y.start, slice_y.stop)} for (slice_y,slice_x) in objects]
final_positions = [(int(np.mean(coords[i]['x'])),int(np.mean(coords[i]['y']))) for i in range(len(coords))]
return final_positions
except IndexError:
print 'no matches found'
# this error occurs if no matches are found
return []
## not what we want a all!!! only will take exact matches, defeating entire point
def post_process(image, template, list_of_coords):
h, w = template.shape
for x, y in list_of_coords:
print x-h + 1, y-w + 1
sub_image = image[x-h + 1:x + 1, y-w + 1:y + 1]
print sub_image.shape, template.shape, x, y
if not np.allclose(template, sub_image):
list_of_coords.remove((x,y))
return list_of_coords
def to_rgb(im):
return np.dstack([im.astype(np.uint8)] * 3).copy(order='C')
def highlight_matched_region_no_normalisation(image, template, method='correlation', raw_tolerance=0.666):
conv = fftconvolve(image, template[::-1,::-1])
th, tw = template.shape
r = find_potential_match_regions(template, conv, method=method, raw_tolerance=raw_tolerance)
r_in_image = [(r_x, r_y) for (r_x, r_y) in r if (r_x < image.shape[0] and r_y < image.shape[1])]
im_rgb = to_rgb(image)
for (x,y) in r_in_image:
try:
im_rgb[x-th:x,y-tw:y] = 0, 100, 100
except IndexError:
im_rgb[x,y] = 0, 100, 100
return im_rgb
def highlight_matched_region_normalised(image, shape, list_of_coords):
th, tw = shape
im_rgb = to_rgb(image)
for (x,y) in list_of_coords:
#print (x,y)
try:
im_rgb[x-th:x,y-tw:y] = 0, 100, 100
except IndexError:
im_rgb[x,y] = 0, 100, 100
return im_rgb
|
ten10solutions/Geist
|
geist/matchers.py
|
match_via_correlation_coefficient
|
python
|
def match_via_correlation_coefficient(image, template, raw_tolerance=1, normed_tolerance=0.9):
h, w = image.shape
th, tw = template.shape
temp_mean = np.mean(template)
temp_minus_mean = template - temp_mean
convolution = fftconvolve(image, temp_minus_mean[::-1,::-1])
convolution = convolution[th-1:h, tw-1:w]
match_position_dict = get_tiles_at_potential_match_regions(image, template, convolution, method='correlation coefficient', raw_tolerance=raw_tolerance)
# this is empty, so think condition is wrong
results = normalise_correlation_coefficient(match_position_dict, convolution, template, normed_tolerance=normed_tolerance)
return results
|
Matching algorithm based on 2-dimensional version of Pearson product-moment correlation coefficient.
This is more robust in the case where the match might be scaled or slightly rotated.
From experimentation, this method is less prone to false positives than the correlation method.
|
train
|
https://github.com/ten10solutions/Geist/blob/a1ef16d8b4c3777735008b671a50acfde3ce7bf1/geist/matchers.py#L43-L59
|
[
"def get_tiles_at_potential_match_regions(image, template, transformed_array, method='correlation', raw_tolerance=0.001):\n if method not in ['correlation', 'correlation coefficient', 'squared difference']:\n raise ValueError('Matching method not implemented')\n h, w = template.shape\n match_points = find_potential_match_regions(template, transformed_array, method=method, raw_tolerance=raw_tolerance)\n match_points = [(match[0], match[1]) for match in match_points]\n # create tile for each match point- use dict so we know which match point it applies to\n # match point here is position of top left pixel of tile\n image_tiles_dict = {match_points[i]:image[match_points[i][0]:match_points[i][0]+h,match_points[i][1]:match_points[i][1]+w] for i in range(len(match_points))}\n return image_tiles_dict\n",
"def normalise_correlation_coefficient(image_tile_dict, transformed_array, template, normed_tolerance=1):\n \"\"\"As above, but for when the correlation coefficient matching method is used\n \"\"\"\n template_mean = np.mean(template)\n template_minus_mean = template - template_mean\n template_norm = np.linalg.norm(template_minus_mean)\n image_norms = {(x,y):np.linalg.norm(image_tile_dict[(x,y)]- np.mean(image_tile_dict[(x,y)]))*template_norm for (x,y) in image_tile_dict.keys()}\n match_points = image_tile_dict.keys()\n # for correlation, then need to transofrm back to get correct value for division\n h, w = template.shape\n image_matches_normalised = {match_points[i]:transformed_array[match_points[i][0], match_points[i][1]]/image_norms[match_points[i]] for i in range(len(match_points))}\n normalised_matches = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) >= normed_tolerance}\n return normalised_matches.keys()\n"
] |
from .match_position_finder_helpers import get_tiles_at_potential_match_regions, normalise_correlation, normalise_correlation_coefficient, find_potential_match_regions
from scipy.signal import fftconvolve
from scipy.ndimage.measurements import label, find_objects
import numpy as np
# both these methods return array of points giving bottom right coordinate of match
def match_via_correlation(image, template, raw_tolerance=1, normed_tolerance=0.9):
""" Matchihng algorithm based on normalised cross correlation.
Using this matching prevents false positives occuring for bright patches in the image
"""
h, w = image.shape
th, tw = template.shape
# fft based convolution enables fast matching of large images
correlation = fftconvolve(image, template[::-1,::-1])
# trim the returned image, fftconvolve returns an image of width: (Temp_w-1) + Im_w + (Temp_w -1), likewise height
correlation = correlation[th-1:h, tw-1:w]
# find images regions which are potentially matches
match_position_dict = get_tiles_at_potential_match_regions(image, template, correlation, raw_tolerance=raw_tolerance)
# bright spots in images can lead to false positivies- the normalisation carried out here eliminates those
results = normalise_correlation(match_position_dict, correlation, template, normed_tolerance=normed_tolerance)
return results
def match_via_squared_difference(image, template, raw_tolerance=1, sq_diff_tolerance=0.1):
""" Matchihng algorithm based on normalised cross correlation.
Using this matching prevents false positives occuring for bright patches in the image
"""
h, w = image.shape
th, tw = template.shape
# fft based convolution enables fast matching of large images
correlation = fftconvolve(image, template[::-1,::-1])
# trim the returned image, fftconvolve returns an image of width: (Temp_w-1) + Im_w + (Temp_w -1), likewise height
correlation = correlation[th-1:h, tw-1:w]
# find images regions which are potentially matches
match_position_dict = get_tiles_at_potential_match_regions(image, template, correlation, raw_tolerance=raw_tolerance)
# bright spots in images can lead to false positivies- the normalisation carried out here eliminates those
results = calculate_squared_differences(match_position_dict, correlation, template, sq_diff_tolerance=sq_diff_tolerance)
return results
def fuzzy_match(image, template, normed_tolerance=None, raw_tolerance=None, method='correlation'):
"""Determines, using one of two methods, whether a match(es) is present and returns the positions of
the bottom right corners of the matches.
Fuzzy matches returns regions, so the center of each region is returned as the final match location
USE THIS FUNCTION IF you need to match, e.g. the same image but rendered slightly different with respect to
anti aliasing; the same image on a number of different backgrounds.
The method is the name of the matching method used, the details of this do not matter. Use the default method
unless you have too many false positives, in this case, use the method 'correlation coefficient.' The
correlation coefficient method can also be more robust at matching when the match might not be exact.
The raw_tolerance is the proportion of the value at match positions (i.e. the value returned for an exact match)
that we count as a match. For fuzzy matching, this value will not be exactly the value returned for an exact match
N. B. Lowering raw_tolerance increases the number of potential match tiles requiring normalisation.
This DRAMATICALLY slows down matching as normalisation (a process which eliminates false positives)
The normed_tolerance is how far a potential match value can differ from one after normalisation.
The tolerance values indicated below are from a short investigation, looking to minimise missing items we wish to match,
as all as false positives which inevitably occur when performing fuzzy matching. To generate these values, we
tested maching letters with different type of antialiasing on a number of backgrounds.
"""
if method == 'correlation':
if not raw_tolerance:
raw_tolerance = 0.95
if not normed_tolerance:
normed_tolerance = 0.95
results = np.array(match_via_correlation(image, template, raw_tolerance=raw_tolerance, normed_tolerance=normed_tolerance))
elif method == 'correlation coefficient':
if not raw_tolerance:
raw_tolerance = 0.95
if not normed_tolerance:
normed_tolerance = 0.95
results = np.array(match_via_correlation_coefficient(image, template, raw_tolerance=raw_tolerance, normed_tolerance=normed_tolerance))
elif method == 'squared difference':
if not raw_tolerance:
raw_tolerance = 0.95
if not normed_tolerance:
normed_tolerance = 0.05
results = np.array(match_via_squared_difference(image, template, raw_tolerance=raw_tolerance, sq_diff_tolerance=normed_tolerance))
h, w = image.shape
th, tw = template.shape
results = np.array([(result[0], result[1]) for result in results])
#match_x, match_y = int(np.mean(results[:,1])), int(np.mean(results[:,0]))
results_aggregated_mean_match_position = match_positions((h,w), results)
return results_aggregated_mean_match_position
def match_positions(shape, list_of_coords):
""" In cases where we have multiple matches, each highlighted by a region of coordinates,
we need to separate matches, and find mean of each to return as match position
"""
match_array = np.zeros(shape)
try:
# excpetion hit on this line if nothing in list_of_coords- i.e. no matches
match_array[list_of_coords[:,0],list_of_coords[:,1]] = 1
labelled = label(match_array)
objects = find_objects(labelled[0])
coords = [{'x':(slice_x.start, slice_x.stop),'y':(slice_y.start, slice_y.stop)} for (slice_y,slice_x) in objects]
final_positions = [(int(np.mean(coords[i]['x'])),int(np.mean(coords[i]['y']))) for i in range(len(coords))]
return final_positions
except IndexError:
print 'no matches found'
# this error occurs if no matches are found
return []
## not what we want a all!!! only will take exact matches, defeating entire point
def post_process(image, template, list_of_coords):
h, w = template.shape
for x, y in list_of_coords:
print x-h + 1, y-w + 1
sub_image = image[x-h + 1:x + 1, y-w + 1:y + 1]
print sub_image.shape, template.shape, x, y
if not np.allclose(template, sub_image):
list_of_coords.remove((x,y))
return list_of_coords
def to_rgb(im):
return np.dstack([im.astype(np.uint8)] * 3).copy(order='C')
def highlight_matched_region_no_normalisation(image, template, method='correlation', raw_tolerance=0.666):
conv = fftconvolve(image, template[::-1,::-1])
th, tw = template.shape
r = find_potential_match_regions(template, conv, method=method, raw_tolerance=raw_tolerance)
r_in_image = [(r_x, r_y) for (r_x, r_y) in r if (r_x < image.shape[0] and r_y < image.shape[1])]
im_rgb = to_rgb(image)
for (x,y) in r_in_image:
try:
im_rgb[x-th:x,y-tw:y] = 0, 100, 100
except IndexError:
im_rgb[x,y] = 0, 100, 100
return im_rgb
def highlight_matched_region_normalised(image, shape, list_of_coords):
th, tw = shape
im_rgb = to_rgb(image)
for (x,y) in list_of_coords:
#print (x,y)
try:
im_rgb[x-th:x,y-tw:y] = 0, 100, 100
except IndexError:
im_rgb[x,y] = 0, 100, 100
return im_rgb
|
ten10solutions/Geist
|
geist/matchers.py
|
fuzzy_match
|
python
|
def fuzzy_match(image, template, normed_tolerance=None, raw_tolerance=None, method='correlation'):
if method == 'correlation':
if not raw_tolerance:
raw_tolerance = 0.95
if not normed_tolerance:
normed_tolerance = 0.95
results = np.array(match_via_correlation(image, template, raw_tolerance=raw_tolerance, normed_tolerance=normed_tolerance))
elif method == 'correlation coefficient':
if not raw_tolerance:
raw_tolerance = 0.95
if not normed_tolerance:
normed_tolerance = 0.95
results = np.array(match_via_correlation_coefficient(image, template, raw_tolerance=raw_tolerance, normed_tolerance=normed_tolerance))
elif method == 'squared difference':
if not raw_tolerance:
raw_tolerance = 0.95
if not normed_tolerance:
normed_tolerance = 0.05
results = np.array(match_via_squared_difference(image, template, raw_tolerance=raw_tolerance, sq_diff_tolerance=normed_tolerance))
h, w = image.shape
th, tw = template.shape
results = np.array([(result[0], result[1]) for result in results])
#match_x, match_y = int(np.mean(results[:,1])), int(np.mean(results[:,0]))
results_aggregated_mean_match_position = match_positions((h,w), results)
return results_aggregated_mean_match_position
|
Determines, using one of two methods, whether a match(es) is present and returns the positions of
the bottom right corners of the matches.
Fuzzy matches returns regions, so the center of each region is returned as the final match location
USE THIS FUNCTION IF you need to match, e.g. the same image but rendered slightly different with respect to
anti aliasing; the same image on a number of different backgrounds.
The method is the name of the matching method used, the details of this do not matter. Use the default method
unless you have too many false positives, in this case, use the method 'correlation coefficient.' The
correlation coefficient method can also be more robust at matching when the match might not be exact.
The raw_tolerance is the proportion of the value at match positions (i.e. the value returned for an exact match)
that we count as a match. For fuzzy matching, this value will not be exactly the value returned for an exact match
N. B. Lowering raw_tolerance increases the number of potential match tiles requiring normalisation.
This DRAMATICALLY slows down matching as normalisation (a process which eliminates false positives)
The normed_tolerance is how far a potential match value can differ from one after normalisation.
The tolerance values indicated below are from a short investigation, looking to minimise missing items we wish to match,
as all as false positives which inevitably occur when performing fuzzy matching. To generate these values, we
tested maching letters with different type of antialiasing on a number of backgrounds.
|
train
|
https://github.com/ten10solutions/Geist/blob/a1ef16d8b4c3777735008b671a50acfde3ce7bf1/geist/matchers.py#L64-L110
|
[
"def match_positions(shape, list_of_coords):\n \"\"\" In cases where we have multiple matches, each highlighted by a region of coordinates,\n we need to separate matches, and find mean of each to return as match position\n \"\"\"\n match_array = np.zeros(shape)\n try:\n # excpetion hit on this line if nothing in list_of_coords- i.e. no matches\n match_array[list_of_coords[:,0],list_of_coords[:,1]] = 1\n labelled = label(match_array)\n objects = find_objects(labelled[0])\n coords = [{'x':(slice_x.start, slice_x.stop),'y':(slice_y.start, slice_y.stop)} for (slice_y,slice_x) in objects]\n final_positions = [(int(np.mean(coords[i]['x'])),int(np.mean(coords[i]['y']))) for i in range(len(coords))]\n return final_positions\n except IndexError:\n print 'no matches found'\n # this error occurs if no matches are found\n return []\n",
"def match_via_correlation(image, template, raw_tolerance=1, normed_tolerance=0.9):\n \"\"\" Matchihng algorithm based on normalised cross correlation.\n Using this matching prevents false positives occuring for bright patches in the image\n \"\"\"\n h, w = image.shape\n th, tw = template.shape\n # fft based convolution enables fast matching of large images\n correlation = fftconvolve(image, template[::-1,::-1])\n # trim the returned image, fftconvolve returns an image of width: (Temp_w-1) + Im_w + (Temp_w -1), likewise height\n correlation = correlation[th-1:h, tw-1:w]\n # find images regions which are potentially matches\n match_position_dict = get_tiles_at_potential_match_regions(image, template, correlation, raw_tolerance=raw_tolerance)\n # bright spots in images can lead to false positivies- the normalisation carried out here eliminates those\n results = normalise_correlation(match_position_dict, correlation, template, normed_tolerance=normed_tolerance)\n return results\n",
"def match_via_squared_difference(image, template, raw_tolerance=1, sq_diff_tolerance=0.1):\n \"\"\" Matchihng algorithm based on normalised cross correlation.\n Using this matching prevents false positives occuring for bright patches in the image\n \"\"\"\n h, w = image.shape\n th, tw = template.shape\n # fft based convolution enables fast matching of large images\n correlation = fftconvolve(image, template[::-1,::-1])\n # trim the returned image, fftconvolve returns an image of width: (Temp_w-1) + Im_w + (Temp_w -1), likewise height\n correlation = correlation[th-1:h, tw-1:w]\n # find images regions which are potentially matches\n match_position_dict = get_tiles_at_potential_match_regions(image, template, correlation, raw_tolerance=raw_tolerance)\n # bright spots in images can lead to false positivies- the normalisation carried out here eliminates those\n results = calculate_squared_differences(match_position_dict, correlation, template, sq_diff_tolerance=sq_diff_tolerance)\n return results\n",
"def match_via_correlation_coefficient(image, template, raw_tolerance=1, normed_tolerance=0.9):\n \"\"\" Matching algorithm based on 2-dimensional version of Pearson product-moment correlation coefficient.\n\n This is more robust in the case where the match might be scaled or slightly rotated.\n\n From experimentation, this method is less prone to false positives than the correlation method.\n \"\"\"\n h, w = image.shape\n th, tw = template.shape\n temp_mean = np.mean(template)\n temp_minus_mean = template - temp_mean\n convolution = fftconvolve(image, temp_minus_mean[::-1,::-1])\n convolution = convolution[th-1:h, tw-1:w]\n match_position_dict = get_tiles_at_potential_match_regions(image, template, convolution, method='correlation coefficient', raw_tolerance=raw_tolerance)\n # this is empty, so think condition is wrong\n results = normalise_correlation_coefficient(match_position_dict, convolution, template, normed_tolerance=normed_tolerance)\n return results\n"
] |
from .match_position_finder_helpers import get_tiles_at_potential_match_regions, normalise_correlation, normalise_correlation_coefficient, find_potential_match_regions
from scipy.signal import fftconvolve
from scipy.ndimage.measurements import label, find_objects
import numpy as np
# both these methods return array of points giving bottom right coordinate of match
def match_via_correlation(image, template, raw_tolerance=1, normed_tolerance=0.9):
""" Matchihng algorithm based on normalised cross correlation.
Using this matching prevents false positives occuring for bright patches in the image
"""
h, w = image.shape
th, tw = template.shape
# fft based convolution enables fast matching of large images
correlation = fftconvolve(image, template[::-1,::-1])
# trim the returned image, fftconvolve returns an image of width: (Temp_w-1) + Im_w + (Temp_w -1), likewise height
correlation = correlation[th-1:h, tw-1:w]
# find images regions which are potentially matches
match_position_dict = get_tiles_at_potential_match_regions(image, template, correlation, raw_tolerance=raw_tolerance)
# bright spots in images can lead to false positivies- the normalisation carried out here eliminates those
results = normalise_correlation(match_position_dict, correlation, template, normed_tolerance=normed_tolerance)
return results
def match_via_squared_difference(image, template, raw_tolerance=1, sq_diff_tolerance=0.1):
""" Matchihng algorithm based on normalised cross correlation.
Using this matching prevents false positives occuring for bright patches in the image
"""
h, w = image.shape
th, tw = template.shape
# fft based convolution enables fast matching of large images
correlation = fftconvolve(image, template[::-1,::-1])
# trim the returned image, fftconvolve returns an image of width: (Temp_w-1) + Im_w + (Temp_w -1), likewise height
correlation = correlation[th-1:h, tw-1:w]
# find images regions which are potentially matches
match_position_dict = get_tiles_at_potential_match_regions(image, template, correlation, raw_tolerance=raw_tolerance)
# bright spots in images can lead to false positivies- the normalisation carried out here eliminates those
results = calculate_squared_differences(match_position_dict, correlation, template, sq_diff_tolerance=sq_diff_tolerance)
return results
def match_via_correlation_coefficient(image, template, raw_tolerance=1, normed_tolerance=0.9):
""" Matching algorithm based on 2-dimensional version of Pearson product-moment correlation coefficient.
This is more robust in the case where the match might be scaled or slightly rotated.
From experimentation, this method is less prone to false positives than the correlation method.
"""
h, w = image.shape
th, tw = template.shape
temp_mean = np.mean(template)
temp_minus_mean = template - temp_mean
convolution = fftconvolve(image, temp_minus_mean[::-1,::-1])
convolution = convolution[th-1:h, tw-1:w]
match_position_dict = get_tiles_at_potential_match_regions(image, template, convolution, method='correlation coefficient', raw_tolerance=raw_tolerance)
# this is empty, so think condition is wrong
results = normalise_correlation_coefficient(match_position_dict, convolution, template, normed_tolerance=normed_tolerance)
return results
def match_positions(shape, list_of_coords):
""" In cases where we have multiple matches, each highlighted by a region of coordinates,
we need to separate matches, and find mean of each to return as match position
"""
match_array = np.zeros(shape)
try:
# excpetion hit on this line if nothing in list_of_coords- i.e. no matches
match_array[list_of_coords[:,0],list_of_coords[:,1]] = 1
labelled = label(match_array)
objects = find_objects(labelled[0])
coords = [{'x':(slice_x.start, slice_x.stop),'y':(slice_y.start, slice_y.stop)} for (slice_y,slice_x) in objects]
final_positions = [(int(np.mean(coords[i]['x'])),int(np.mean(coords[i]['y']))) for i in range(len(coords))]
return final_positions
except IndexError:
print 'no matches found'
# this error occurs if no matches are found
return []
## not what we want a all!!! only will take exact matches, defeating entire point
def post_process(image, template, list_of_coords):
h, w = template.shape
for x, y in list_of_coords:
print x-h + 1, y-w + 1
sub_image = image[x-h + 1:x + 1, y-w + 1:y + 1]
print sub_image.shape, template.shape, x, y
if not np.allclose(template, sub_image):
list_of_coords.remove((x,y))
return list_of_coords
def to_rgb(im):
return np.dstack([im.astype(np.uint8)] * 3).copy(order='C')
def highlight_matched_region_no_normalisation(image, template, method='correlation', raw_tolerance=0.666):
conv = fftconvolve(image, template[::-1,::-1])
th, tw = template.shape
r = find_potential_match_regions(template, conv, method=method, raw_tolerance=raw_tolerance)
r_in_image = [(r_x, r_y) for (r_x, r_y) in r if (r_x < image.shape[0] and r_y < image.shape[1])]
im_rgb = to_rgb(image)
for (x,y) in r_in_image:
try:
im_rgb[x-th:x,y-tw:y] = 0, 100, 100
except IndexError:
im_rgb[x,y] = 0, 100, 100
return im_rgb
def highlight_matched_region_normalised(image, shape, list_of_coords):
th, tw = shape
im_rgb = to_rgb(image)
for (x,y) in list_of_coords:
#print (x,y)
try:
im_rgb[x-th:x,y-tw:y] = 0, 100, 100
except IndexError:
im_rgb[x,y] = 0, 100, 100
return im_rgb
|
ten10solutions/Geist
|
geist/matchers.py
|
match_positions
|
python
|
def match_positions(shape, list_of_coords):
match_array = np.zeros(shape)
try:
# excpetion hit on this line if nothing in list_of_coords- i.e. no matches
match_array[list_of_coords[:,0],list_of_coords[:,1]] = 1
labelled = label(match_array)
objects = find_objects(labelled[0])
coords = [{'x':(slice_x.start, slice_x.stop),'y':(slice_y.start, slice_y.stop)} for (slice_y,slice_x) in objects]
final_positions = [(int(np.mean(coords[i]['x'])),int(np.mean(coords[i]['y']))) for i in range(len(coords))]
return final_positions
except IndexError:
print 'no matches found'
# this error occurs if no matches are found
return []
|
In cases where we have multiple matches, each highlighted by a region of coordinates,
we need to separate matches, and find mean of each to return as match position
|
train
|
https://github.com/ten10solutions/Geist/blob/a1ef16d8b4c3777735008b671a50acfde3ce7bf1/geist/matchers.py#L114-L130
| null |
from .match_position_finder_helpers import get_tiles_at_potential_match_regions, normalise_correlation, normalise_correlation_coefficient, find_potential_match_regions
from scipy.signal import fftconvolve
from scipy.ndimage.measurements import label, find_objects
import numpy as np
# both these methods return array of points giving bottom right coordinate of match
def match_via_correlation(image, template, raw_tolerance=1, normed_tolerance=0.9):
""" Matchihng algorithm based on normalised cross correlation.
Using this matching prevents false positives occuring for bright patches in the image
"""
h, w = image.shape
th, tw = template.shape
# fft based convolution enables fast matching of large images
correlation = fftconvolve(image, template[::-1,::-1])
# trim the returned image, fftconvolve returns an image of width: (Temp_w-1) + Im_w + (Temp_w -1), likewise height
correlation = correlation[th-1:h, tw-1:w]
# find images regions which are potentially matches
match_position_dict = get_tiles_at_potential_match_regions(image, template, correlation, raw_tolerance=raw_tolerance)
# bright spots in images can lead to false positivies- the normalisation carried out here eliminates those
results = normalise_correlation(match_position_dict, correlation, template, normed_tolerance=normed_tolerance)
return results
def match_via_squared_difference(image, template, raw_tolerance=1, sq_diff_tolerance=0.1):
""" Matchihng algorithm based on normalised cross correlation.
Using this matching prevents false positives occuring for bright patches in the image
"""
h, w = image.shape
th, tw = template.shape
# fft based convolution enables fast matching of large images
correlation = fftconvolve(image, template[::-1,::-1])
# trim the returned image, fftconvolve returns an image of width: (Temp_w-1) + Im_w + (Temp_w -1), likewise height
correlation = correlation[th-1:h, tw-1:w]
# find images regions which are potentially matches
match_position_dict = get_tiles_at_potential_match_regions(image, template, correlation, raw_tolerance=raw_tolerance)
# bright spots in images can lead to false positivies- the normalisation carried out here eliminates those
results = calculate_squared_differences(match_position_dict, correlation, template, sq_diff_tolerance=sq_diff_tolerance)
return results
def match_via_correlation_coefficient(image, template, raw_tolerance=1, normed_tolerance=0.9):
""" Matching algorithm based on 2-dimensional version of Pearson product-moment correlation coefficient.
This is more robust in the case where the match might be scaled or slightly rotated.
From experimentation, this method is less prone to false positives than the correlation method.
"""
h, w = image.shape
th, tw = template.shape
temp_mean = np.mean(template)
temp_minus_mean = template - temp_mean
convolution = fftconvolve(image, temp_minus_mean[::-1,::-1])
convolution = convolution[th-1:h, tw-1:w]
match_position_dict = get_tiles_at_potential_match_regions(image, template, convolution, method='correlation coefficient', raw_tolerance=raw_tolerance)
# this is empty, so think condition is wrong
results = normalise_correlation_coefficient(match_position_dict, convolution, template, normed_tolerance=normed_tolerance)
return results
def fuzzy_match(image, template, normed_tolerance=None, raw_tolerance=None, method='correlation'):
"""Determines, using one of two methods, whether a match(es) is present and returns the positions of
the bottom right corners of the matches.
Fuzzy matches returns regions, so the center of each region is returned as the final match location
USE THIS FUNCTION IF you need to match, e.g. the same image but rendered slightly different with respect to
anti aliasing; the same image on a number of different backgrounds.
The method is the name of the matching method used, the details of this do not matter. Use the default method
unless you have too many false positives, in this case, use the method 'correlation coefficient.' The
correlation coefficient method can also be more robust at matching when the match might not be exact.
The raw_tolerance is the proportion of the value at match positions (i.e. the value returned for an exact match)
that we count as a match. For fuzzy matching, this value will not be exactly the value returned for an exact match
N. B. Lowering raw_tolerance increases the number of potential match tiles requiring normalisation.
This DRAMATICALLY slows down matching as normalisation (a process which eliminates false positives)
The normed_tolerance is how far a potential match value can differ from one after normalisation.
The tolerance values indicated below are from a short investigation, looking to minimise missing items we wish to match,
as all as false positives which inevitably occur when performing fuzzy matching. To generate these values, we
tested maching letters with different type of antialiasing on a number of backgrounds.
"""
if method == 'correlation':
if not raw_tolerance:
raw_tolerance = 0.95
if not normed_tolerance:
normed_tolerance = 0.95
results = np.array(match_via_correlation(image, template, raw_tolerance=raw_tolerance, normed_tolerance=normed_tolerance))
elif method == 'correlation coefficient':
if not raw_tolerance:
raw_tolerance = 0.95
if not normed_tolerance:
normed_tolerance = 0.95
results = np.array(match_via_correlation_coefficient(image, template, raw_tolerance=raw_tolerance, normed_tolerance=normed_tolerance))
elif method == 'squared difference':
if not raw_tolerance:
raw_tolerance = 0.95
if not normed_tolerance:
normed_tolerance = 0.05
results = np.array(match_via_squared_difference(image, template, raw_tolerance=raw_tolerance, sq_diff_tolerance=normed_tolerance))
h, w = image.shape
th, tw = template.shape
results = np.array([(result[0], result[1]) for result in results])
#match_x, match_y = int(np.mean(results[:,1])), int(np.mean(results[:,0]))
results_aggregated_mean_match_position = match_positions((h,w), results)
return results_aggregated_mean_match_position
## not what we want a all!!! only will take exact matches, defeating entire point
def post_process(image, template, list_of_coords):
h, w = template.shape
for x, y in list_of_coords:
print x-h + 1, y-w + 1
sub_image = image[x-h + 1:x + 1, y-w + 1:y + 1]
print sub_image.shape, template.shape, x, y
if not np.allclose(template, sub_image):
list_of_coords.remove((x,y))
return list_of_coords
def to_rgb(im):
return np.dstack([im.astype(np.uint8)] * 3).copy(order='C')
def highlight_matched_region_no_normalisation(image, template, method='correlation', raw_tolerance=0.666):
conv = fftconvolve(image, template[::-1,::-1])
th, tw = template.shape
r = find_potential_match_regions(template, conv, method=method, raw_tolerance=raw_tolerance)
r_in_image = [(r_x, r_y) for (r_x, r_y) in r if (r_x < image.shape[0] and r_y < image.shape[1])]
im_rgb = to_rgb(image)
for (x,y) in r_in_image:
try:
im_rgb[x-th:x,y-tw:y] = 0, 100, 100
except IndexError:
im_rgb[x,y] = 0, 100, 100
return im_rgb
def highlight_matched_region_normalised(image, shape, list_of_coords):
th, tw = shape
im_rgb = to_rgb(image)
for (x,y) in list_of_coords:
#print (x,y)
try:
im_rgb[x-th:x,y-tw:y] = 0, 100, 100
except IndexError:
im_rgb[x,y] = 0, 100, 100
return im_rgb
|
stxnext/mappet
|
mappet/helpers.py
|
no_empty_value
|
python
|
def no_empty_value(func):
@wraps(func)
def wrapper(value):
if not value:
raise Exception("Empty value not allowed")
return func(value)
return wrapper
|
Raises an exception if function argument is empty.
|
train
|
https://github.com/stxnext/mappet/blob/ac7468ac28ed82e45065b1e348cf865c8f73f0db/mappet/helpers.py#L38-L45
| null |
# -*- coding: utf-8 -*-
u"""Helper functions.
.. :module: helpers
:synopsis: Helper functions.
"""
from collections import defaultdict
from decimal import Decimal
from functools import partial, wraps
import datetime
from lxml import etree
import dateutil.parser
__all__ = [
'to_bool',
'to_date',
'to_datetime',
'to_decimal',
'to_float',
'to_int',
'to_str',
'to_time',
'from_bool',
'from_date',
'from_datetime',
'from_time',
'CAST_DICT',
'normalize_tag',
'etree_to_dict',
'dict_to_etree',
]
def to_bool(value):
"""Converts human boolean-like values to Python boolean.
Falls back to :class:`bool` when ``value`` is not recognized.
:param value: the value to convert
:returns: ``True`` if value is truthy, ``False`` otherwise
:rtype: bool
"""
cases = {
'0': False,
'false': False,
'no': False,
'1': True,
'true': True,
'yes': True,
}
value = value.lower() if isinstance(value, basestring) else value
return cases.get(value, bool(value))
def to_str(value):
u"""Represents values as unicode strings to support diacritics."""
return unicode(value)
def to_int(value):
return int(value)
def to_float(value):
return float(value)
def to_decimal(value):
return Decimal(value)
@no_empty_value
def to_time(value):
value = str(value)
# dateutil.parse has problems parsing full hours without minutes
sep = value[2:3]
if not (sep == ':' or sep.isdigit()):
value = value[:2] + ':00' + value[2:]
return dateutil.parser.parse(value).time()
@no_empty_value
def to_datetime(value):
return parse_datetime(value)
def parse_datetime(value):
value = str(value)
return dateutil.parser.parse(value)
@no_empty_value
def to_date(value):
return parse_datetime(value)
def from_bool(value):
cases = {
True: 'YES',
False: 'NO',
}
try:
return cases.get(value, bool(value))
except Exception:
return False
def from_time(value):
if not isinstance(value, datetime.time):
raise Exception('Value {} is not datetime.time object'.format(value))
return value.isoformat()
@no_empty_value
def from_datetime(value):
if not isinstance(value, datetime.datetime):
raise Exception('Unexpected type {} of value {} (expected datetime.datetime)'.format(type(value), value))
if value.tzinfo is None:
value = value.replace(tzinfo=dateutil.tz.tzlocal()) # pragma: nocover
return value.replace(microsecond=0).isoformat()
@no_empty_value
def from_date(value):
if not isinstance(value, datetime.date) and not isinstance(value, datetime.datetime):
raise Exception('Not datetime.date object but {}: {}'.format(type(value), value))
return value.isoformat()
CAST_DICT = {
bool: from_bool,
int: str,
str: str,
unicode: str,
float: str,
datetime.time: from_time,
datetime.datetime: from_datetime,
datetime.date: from_date,
}
def normalize_tag(tag):
u"""Normalizes tag name.
:param str tag: tag name to normalize
:rtype: str
:returns: normalized tag name
>>> normalize_tag('tag-NaMe')
'tag_name'
"""
return tag.lower().replace('-', '_')
def etree_to_dict(t, trim=True, **kw):
u"""Converts an lxml.etree object to Python dict.
>>> etree_to_dict(etree.Element('root'))
{'root': None}
:param etree.Element t: lxml tree to convert
:returns d: a dict representing the lxml tree ``t``
:rtype: dict
"""
d = {t.tag: {} if t.attrib else None}
children = list(t)
etree_to_dict_w_args = partial(etree_to_dict, trim=trim, **kw)
if children:
dd = defaultdict(list)
d = {t.tag: {}}
for dc in map(etree_to_dict_w_args, children):
for k, v in dc.iteritems():
# do not add Comment instance to the key
if k is not etree.Comment:
dd[k].append(v)
d[t.tag] = {k: v[0] if len(v) == 1 else v for k, v in dd.iteritems()}
if t.attrib:
d[t.tag].update(('@' + k, v) for k, v in t.attrib.iteritems())
if trim and t.text:
t.text = t.text.strip()
if t.text:
if t.tag is etree.Comment and not kw.get('without_comments'):
# adds a comments node
d['#comments'] = t.text
elif children or t.attrib:
d[t.tag]['#text'] = t.text
else:
d[t.tag] = t.text
return d
def dict_to_etree(d, root):
u"""Converts a dict to lxml.etree object.
>>> dict_to_etree({'root': {'#text': 'node_text', '@attr': 'val'}}, etree.Element('root')) # doctest: +ELLIPSIS
<Element root at 0x...>
:param dict d: dict representing the XML tree
:param etree.Element root: XML node which will be assigned the resulting tree
:returns: Textual representation of the XML tree
:rtype: str
"""
def _to_etree(d, node):
if d is None or len(d) == 0:
return
elif isinstance(d, basestring):
node.text = d
elif isinstance(d, dict):
for k, v in d.items():
assert isinstance(k, basestring)
if k.startswith('#'):
assert k == '#text'
assert isinstance(v, basestring)
node.text = v
elif k.startswith('@'):
assert isinstance(v, basestring)
node.set(k[1:], v)
elif isinstance(v, list):
# No matter the child count, their parent will be the same.
sub_elem = etree.SubElement(node, k)
for child_num, e in enumerate(v):
if e is None:
if child_num == 0:
# Found the first occurrence of an empty child,
# skip creating of its XML repr, since it would be
# the same as ``sub_element`` higher up.
continue
# A list with None element means an empty child node
# in its parent, thus, recreating tags we have to go
# up one level.
# <node><child/></child></node> <=> {'node': 'child': [None, None]}
_to_etree(node, k)
else:
# If this isn't first child and it's a complex
# value (dict), we need to check if it's value
# is equivalent to None.
if child_num != 0 and not (isinstance(e, dict) and not all(e.values())):
# At least one child was None, we have to create
# a new parent-node, which will not be empty.
sub_elem = etree.SubElement(node, k)
_to_etree(e, sub_elem)
else:
_to_etree(v, etree.SubElement(node, k))
elif etree.iselement(d):
# Supports the case, when we got an empty child and want to recreate it.
etree.SubElement(d, node)
else:
raise AttributeError('Argument is neither dict nor basestring.')
_to_etree(d, root)
return root
|
stxnext/mappet
|
mappet/helpers.py
|
to_bool
|
python
|
def to_bool(value):
cases = {
'0': False,
'false': False,
'no': False,
'1': True,
'true': True,
'yes': True,
}
value = value.lower() if isinstance(value, basestring) else value
return cases.get(value, bool(value))
|
Converts human boolean-like values to Python boolean.
Falls back to :class:`bool` when ``value`` is not recognized.
:param value: the value to convert
:returns: ``True`` if value is truthy, ``False`` otherwise
:rtype: bool
|
train
|
https://github.com/stxnext/mappet/blob/ac7468ac28ed82e45065b1e348cf865c8f73f0db/mappet/helpers.py#L48-L67
| null |
# -*- coding: utf-8 -*-
u"""Helper functions.
.. :module: helpers
:synopsis: Helper functions.
"""
from collections import defaultdict
from decimal import Decimal
from functools import partial, wraps
import datetime
from lxml import etree
import dateutil.parser
__all__ = [
'to_bool',
'to_date',
'to_datetime',
'to_decimal',
'to_float',
'to_int',
'to_str',
'to_time',
'from_bool',
'from_date',
'from_datetime',
'from_time',
'CAST_DICT',
'normalize_tag',
'etree_to_dict',
'dict_to_etree',
]
def no_empty_value(func):
"""Raises an exception if function argument is empty."""
@wraps(func)
def wrapper(value):
if not value:
raise Exception("Empty value not allowed")
return func(value)
return wrapper
def to_str(value):
u"""Represents values as unicode strings to support diacritics."""
return unicode(value)
def to_int(value):
return int(value)
def to_float(value):
return float(value)
def to_decimal(value):
return Decimal(value)
@no_empty_value
def to_time(value):
value = str(value)
# dateutil.parse has problems parsing full hours without minutes
sep = value[2:3]
if not (sep == ':' or sep.isdigit()):
value = value[:2] + ':00' + value[2:]
return dateutil.parser.parse(value).time()
@no_empty_value
def to_datetime(value):
return parse_datetime(value)
def parse_datetime(value):
value = str(value)
return dateutil.parser.parse(value)
@no_empty_value
def to_date(value):
return parse_datetime(value)
def from_bool(value):
cases = {
True: 'YES',
False: 'NO',
}
try:
return cases.get(value, bool(value))
except Exception:
return False
def from_time(value):
if not isinstance(value, datetime.time):
raise Exception('Value {} is not datetime.time object'.format(value))
return value.isoformat()
@no_empty_value
def from_datetime(value):
if not isinstance(value, datetime.datetime):
raise Exception('Unexpected type {} of value {} (expected datetime.datetime)'.format(type(value), value))
if value.tzinfo is None:
value = value.replace(tzinfo=dateutil.tz.tzlocal()) # pragma: nocover
return value.replace(microsecond=0).isoformat()
@no_empty_value
def from_date(value):
if not isinstance(value, datetime.date) and not isinstance(value, datetime.datetime):
raise Exception('Not datetime.date object but {}: {}'.format(type(value), value))
return value.isoformat()
CAST_DICT = {
bool: from_bool,
int: str,
str: str,
unicode: str,
float: str,
datetime.time: from_time,
datetime.datetime: from_datetime,
datetime.date: from_date,
}
def normalize_tag(tag):
u"""Normalizes tag name.
:param str tag: tag name to normalize
:rtype: str
:returns: normalized tag name
>>> normalize_tag('tag-NaMe')
'tag_name'
"""
return tag.lower().replace('-', '_')
def etree_to_dict(t, trim=True, **kw):
u"""Converts an lxml.etree object to Python dict.
>>> etree_to_dict(etree.Element('root'))
{'root': None}
:param etree.Element t: lxml tree to convert
:returns d: a dict representing the lxml tree ``t``
:rtype: dict
"""
d = {t.tag: {} if t.attrib else None}
children = list(t)
etree_to_dict_w_args = partial(etree_to_dict, trim=trim, **kw)
if children:
dd = defaultdict(list)
d = {t.tag: {}}
for dc in map(etree_to_dict_w_args, children):
for k, v in dc.iteritems():
# do not add Comment instance to the key
if k is not etree.Comment:
dd[k].append(v)
d[t.tag] = {k: v[0] if len(v) == 1 else v for k, v in dd.iteritems()}
if t.attrib:
d[t.tag].update(('@' + k, v) for k, v in t.attrib.iteritems())
if trim and t.text:
t.text = t.text.strip()
if t.text:
if t.tag is etree.Comment and not kw.get('without_comments'):
# adds a comments node
d['#comments'] = t.text
elif children or t.attrib:
d[t.tag]['#text'] = t.text
else:
d[t.tag] = t.text
return d
def dict_to_etree(d, root):
u"""Converts a dict to lxml.etree object.
>>> dict_to_etree({'root': {'#text': 'node_text', '@attr': 'val'}}, etree.Element('root')) # doctest: +ELLIPSIS
<Element root at 0x...>
:param dict d: dict representing the XML tree
:param etree.Element root: XML node which will be assigned the resulting tree
:returns: Textual representation of the XML tree
:rtype: str
"""
def _to_etree(d, node):
if d is None or len(d) == 0:
return
elif isinstance(d, basestring):
node.text = d
elif isinstance(d, dict):
for k, v in d.items():
assert isinstance(k, basestring)
if k.startswith('#'):
assert k == '#text'
assert isinstance(v, basestring)
node.text = v
elif k.startswith('@'):
assert isinstance(v, basestring)
node.set(k[1:], v)
elif isinstance(v, list):
# No matter the child count, their parent will be the same.
sub_elem = etree.SubElement(node, k)
for child_num, e in enumerate(v):
if e is None:
if child_num == 0:
# Found the first occurrence of an empty child,
# skip creating of its XML repr, since it would be
# the same as ``sub_element`` higher up.
continue
# A list with None element means an empty child node
# in its parent, thus, recreating tags we have to go
# up one level.
# <node><child/></child></node> <=> {'node': 'child': [None, None]}
_to_etree(node, k)
else:
# If this isn't first child and it's a complex
# value (dict), we need to check if it's value
# is equivalent to None.
if child_num != 0 and not (isinstance(e, dict) and not all(e.values())):
# At least one child was None, we have to create
# a new parent-node, which will not be empty.
sub_elem = etree.SubElement(node, k)
_to_etree(e, sub_elem)
else:
_to_etree(v, etree.SubElement(node, k))
elif etree.iselement(d):
# Supports the case, when we got an empty child and want to recreate it.
etree.SubElement(d, node)
else:
raise AttributeError('Argument is neither dict nor basestring.')
_to_etree(d, root)
return root
|
stxnext/mappet
|
mappet/helpers.py
|
etree_to_dict
|
python
|
def etree_to_dict(t, trim=True, **kw):
u"""Converts an lxml.etree object to Python dict.
>>> etree_to_dict(etree.Element('root'))
{'root': None}
:param etree.Element t: lxml tree to convert
:returns d: a dict representing the lxml tree ``t``
:rtype: dict
"""
d = {t.tag: {} if t.attrib else None}
children = list(t)
etree_to_dict_w_args = partial(etree_to_dict, trim=trim, **kw)
if children:
dd = defaultdict(list)
d = {t.tag: {}}
for dc in map(etree_to_dict_w_args, children):
for k, v in dc.iteritems():
# do not add Comment instance to the key
if k is not etree.Comment:
dd[k].append(v)
d[t.tag] = {k: v[0] if len(v) == 1 else v for k, v in dd.iteritems()}
if t.attrib:
d[t.tag].update(('@' + k, v) for k, v in t.attrib.iteritems())
if trim and t.text:
t.text = t.text.strip()
if t.text:
if t.tag is etree.Comment and not kw.get('without_comments'):
# adds a comments node
d['#comments'] = t.text
elif children or t.attrib:
d[t.tag]['#text'] = t.text
else:
d[t.tag] = t.text
return d
|
u"""Converts an lxml.etree object to Python dict.
>>> etree_to_dict(etree.Element('root'))
{'root': None}
:param etree.Element t: lxml tree to convert
:returns d: a dict representing the lxml tree ``t``
:rtype: dict
|
train
|
https://github.com/stxnext/mappet/blob/ac7468ac28ed82e45065b1e348cf865c8f73f0db/mappet/helpers.py#L174-L212
| null |
# -*- coding: utf-8 -*-
u"""Helper functions.
.. :module: helpers
:synopsis: Helper functions.
"""
from collections import defaultdict
from decimal import Decimal
from functools import partial, wraps
import datetime
from lxml import etree
import dateutil.parser
__all__ = [
'to_bool',
'to_date',
'to_datetime',
'to_decimal',
'to_float',
'to_int',
'to_str',
'to_time',
'from_bool',
'from_date',
'from_datetime',
'from_time',
'CAST_DICT',
'normalize_tag',
'etree_to_dict',
'dict_to_etree',
]
def no_empty_value(func):
"""Raises an exception if function argument is empty."""
@wraps(func)
def wrapper(value):
if not value:
raise Exception("Empty value not allowed")
return func(value)
return wrapper
def to_bool(value):
"""Converts human boolean-like values to Python boolean.
Falls back to :class:`bool` when ``value`` is not recognized.
:param value: the value to convert
:returns: ``True`` if value is truthy, ``False`` otherwise
:rtype: bool
"""
cases = {
'0': False,
'false': False,
'no': False,
'1': True,
'true': True,
'yes': True,
}
value = value.lower() if isinstance(value, basestring) else value
return cases.get(value, bool(value))
def to_str(value):
u"""Represents values as unicode strings to support diacritics."""
return unicode(value)
def to_int(value):
return int(value)
def to_float(value):
return float(value)
def to_decimal(value):
return Decimal(value)
@no_empty_value
def to_time(value):
value = str(value)
# dateutil.parse has problems parsing full hours without minutes
sep = value[2:3]
if not (sep == ':' or sep.isdigit()):
value = value[:2] + ':00' + value[2:]
return dateutil.parser.parse(value).time()
@no_empty_value
def to_datetime(value):
return parse_datetime(value)
def parse_datetime(value):
value = str(value)
return dateutil.parser.parse(value)
@no_empty_value
def to_date(value):
return parse_datetime(value)
def from_bool(value):
cases = {
True: 'YES',
False: 'NO',
}
try:
return cases.get(value, bool(value))
except Exception:
return False
def from_time(value):
if not isinstance(value, datetime.time):
raise Exception('Value {} is not datetime.time object'.format(value))
return value.isoformat()
@no_empty_value
def from_datetime(value):
if not isinstance(value, datetime.datetime):
raise Exception('Unexpected type {} of value {} (expected datetime.datetime)'.format(type(value), value))
if value.tzinfo is None:
value = value.replace(tzinfo=dateutil.tz.tzlocal()) # pragma: nocover
return value.replace(microsecond=0).isoformat()
@no_empty_value
def from_date(value):
if not isinstance(value, datetime.date) and not isinstance(value, datetime.datetime):
raise Exception('Not datetime.date object but {}: {}'.format(type(value), value))
return value.isoformat()
CAST_DICT = {
bool: from_bool,
int: str,
str: str,
unicode: str,
float: str,
datetime.time: from_time,
datetime.datetime: from_datetime,
datetime.date: from_date,
}
def normalize_tag(tag):
u"""Normalizes tag name.
:param str tag: tag name to normalize
:rtype: str
:returns: normalized tag name
>>> normalize_tag('tag-NaMe')
'tag_name'
"""
return tag.lower().replace('-', '_')
def dict_to_etree(d, root):
u"""Converts a dict to lxml.etree object.
>>> dict_to_etree({'root': {'#text': 'node_text', '@attr': 'val'}}, etree.Element('root')) # doctest: +ELLIPSIS
<Element root at 0x...>
:param dict d: dict representing the XML tree
:param etree.Element root: XML node which will be assigned the resulting tree
:returns: Textual representation of the XML tree
:rtype: str
"""
def _to_etree(d, node):
if d is None or len(d) == 0:
return
elif isinstance(d, basestring):
node.text = d
elif isinstance(d, dict):
for k, v in d.items():
assert isinstance(k, basestring)
if k.startswith('#'):
assert k == '#text'
assert isinstance(v, basestring)
node.text = v
elif k.startswith('@'):
assert isinstance(v, basestring)
node.set(k[1:], v)
elif isinstance(v, list):
# No matter the child count, their parent will be the same.
sub_elem = etree.SubElement(node, k)
for child_num, e in enumerate(v):
if e is None:
if child_num == 0:
# Found the first occurrence of an empty child,
# skip creating of its XML repr, since it would be
# the same as ``sub_element`` higher up.
continue
# A list with None element means an empty child node
# in its parent, thus, recreating tags we have to go
# up one level.
# <node><child/></child></node> <=> {'node': 'child': [None, None]}
_to_etree(node, k)
else:
# If this isn't first child and it's a complex
# value (dict), we need to check if it's value
# is equivalent to None.
if child_num != 0 and not (isinstance(e, dict) and not all(e.values())):
# At least one child was None, we have to create
# a new parent-node, which will not be empty.
sub_elem = etree.SubElement(node, k)
_to_etree(e, sub_elem)
else:
_to_etree(v, etree.SubElement(node, k))
elif etree.iselement(d):
# Supports the case, when we got an empty child and want to recreate it.
etree.SubElement(d, node)
else:
raise AttributeError('Argument is neither dict nor basestring.')
_to_etree(d, root)
return root
|
stxnext/mappet
|
mappet/helpers.py
|
dict_to_etree
|
python
|
def dict_to_etree(d, root):
u"""Converts a dict to lxml.etree object.
>>> dict_to_etree({'root': {'#text': 'node_text', '@attr': 'val'}}, etree.Element('root')) # doctest: +ELLIPSIS
<Element root at 0x...>
:param dict d: dict representing the XML tree
:param etree.Element root: XML node which will be assigned the resulting tree
:returns: Textual representation of the XML tree
:rtype: str
"""
def _to_etree(d, node):
if d is None or len(d) == 0:
return
elif isinstance(d, basestring):
node.text = d
elif isinstance(d, dict):
for k, v in d.items():
assert isinstance(k, basestring)
if k.startswith('#'):
assert k == '#text'
assert isinstance(v, basestring)
node.text = v
elif k.startswith('@'):
assert isinstance(v, basestring)
node.set(k[1:], v)
elif isinstance(v, list):
# No matter the child count, their parent will be the same.
sub_elem = etree.SubElement(node, k)
for child_num, e in enumerate(v):
if e is None:
if child_num == 0:
# Found the first occurrence of an empty child,
# skip creating of its XML repr, since it would be
# the same as ``sub_element`` higher up.
continue
# A list with None element means an empty child node
# in its parent, thus, recreating tags we have to go
# up one level.
# <node><child/></child></node> <=> {'node': 'child': [None, None]}
_to_etree(node, k)
else:
# If this isn't first child and it's a complex
# value (dict), we need to check if it's value
# is equivalent to None.
if child_num != 0 and not (isinstance(e, dict) and not all(e.values())):
# At least one child was None, we have to create
# a new parent-node, which will not be empty.
sub_elem = etree.SubElement(node, k)
_to_etree(e, sub_elem)
else:
_to_etree(v, etree.SubElement(node, k))
elif etree.iselement(d):
# Supports the case, when we got an empty child and want to recreate it.
etree.SubElement(d, node)
else:
raise AttributeError('Argument is neither dict nor basestring.')
_to_etree(d, root)
return root
|
u"""Converts a dict to lxml.etree object.
>>> dict_to_etree({'root': {'#text': 'node_text', '@attr': 'val'}}, etree.Element('root')) # doctest: +ELLIPSIS
<Element root at 0x...>
:param dict d: dict representing the XML tree
:param etree.Element root: XML node which will be assigned the resulting tree
:returns: Textual representation of the XML tree
:rtype: str
|
train
|
https://github.com/stxnext/mappet/blob/ac7468ac28ed82e45065b1e348cf865c8f73f0db/mappet/helpers.py#L215-L275
|
[
"def _to_etree(d, node):\n if d is None or len(d) == 0:\n return\n elif isinstance(d, basestring):\n node.text = d\n elif isinstance(d, dict):\n for k, v in d.items():\n assert isinstance(k, basestring)\n if k.startswith('#'):\n assert k == '#text'\n assert isinstance(v, basestring)\n node.text = v\n elif k.startswith('@'):\n assert isinstance(v, basestring)\n node.set(k[1:], v)\n elif isinstance(v, list):\n # No matter the child count, their parent will be the same.\n sub_elem = etree.SubElement(node, k)\n\n for child_num, e in enumerate(v):\n if e is None:\n if child_num == 0:\n # Found the first occurrence of an empty child,\n # skip creating of its XML repr, since it would be\n # the same as ``sub_element`` higher up.\n continue\n # A list with None element means an empty child node\n # in its parent, thus, recreating tags we have to go\n # up one level.\n # <node><child/></child></node> <=> {'node': 'child': [None, None]}\n _to_etree(node, k)\n else:\n # If this isn't first child and it's a complex\n # value (dict), we need to check if it's value\n # is equivalent to None.\n if child_num != 0 and not (isinstance(e, dict) and not all(e.values())):\n # At least one child was None, we have to create\n # a new parent-node, which will not be empty.\n sub_elem = etree.SubElement(node, k)\n _to_etree(e, sub_elem)\n else:\n _to_etree(v, etree.SubElement(node, k))\n elif etree.iselement(d):\n # Supports the case, when we got an empty child and want to recreate it.\n etree.SubElement(d, node)\n else:\n raise AttributeError('Argument is neither dict nor basestring.')\n"
] |
# -*- coding: utf-8 -*-
u"""Helper functions.
.. :module: helpers
:synopsis: Helper functions.
"""
from collections import defaultdict
from decimal import Decimal
from functools import partial, wraps
import datetime
from lxml import etree
import dateutil.parser
__all__ = [
'to_bool',
'to_date',
'to_datetime',
'to_decimal',
'to_float',
'to_int',
'to_str',
'to_time',
'from_bool',
'from_date',
'from_datetime',
'from_time',
'CAST_DICT',
'normalize_tag',
'etree_to_dict',
'dict_to_etree',
]
def no_empty_value(func):
"""Raises an exception if function argument is empty."""
@wraps(func)
def wrapper(value):
if not value:
raise Exception("Empty value not allowed")
return func(value)
return wrapper
def to_bool(value):
"""Converts human boolean-like values to Python boolean.
Falls back to :class:`bool` when ``value`` is not recognized.
:param value: the value to convert
:returns: ``True`` if value is truthy, ``False`` otherwise
:rtype: bool
"""
cases = {
'0': False,
'false': False,
'no': False,
'1': True,
'true': True,
'yes': True,
}
value = value.lower() if isinstance(value, basestring) else value
return cases.get(value, bool(value))
def to_str(value):
u"""Represents values as unicode strings to support diacritics."""
return unicode(value)
def to_int(value):
return int(value)
def to_float(value):
return float(value)
def to_decimal(value):
return Decimal(value)
@no_empty_value
def to_time(value):
value = str(value)
# dateutil.parse has problems parsing full hours without minutes
sep = value[2:3]
if not (sep == ':' or sep.isdigit()):
value = value[:2] + ':00' + value[2:]
return dateutil.parser.parse(value).time()
@no_empty_value
def to_datetime(value):
return parse_datetime(value)
def parse_datetime(value):
value = str(value)
return dateutil.parser.parse(value)
@no_empty_value
def to_date(value):
return parse_datetime(value)
def from_bool(value):
cases = {
True: 'YES',
False: 'NO',
}
try:
return cases.get(value, bool(value))
except Exception:
return False
def from_time(value):
if not isinstance(value, datetime.time):
raise Exception('Value {} is not datetime.time object'.format(value))
return value.isoformat()
@no_empty_value
def from_datetime(value):
if not isinstance(value, datetime.datetime):
raise Exception('Unexpected type {} of value {} (expected datetime.datetime)'.format(type(value), value))
if value.tzinfo is None:
value = value.replace(tzinfo=dateutil.tz.tzlocal()) # pragma: nocover
return value.replace(microsecond=0).isoformat()
@no_empty_value
def from_date(value):
if not isinstance(value, datetime.date) and not isinstance(value, datetime.datetime):
raise Exception('Not datetime.date object but {}: {}'.format(type(value), value))
return value.isoformat()
CAST_DICT = {
bool: from_bool,
int: str,
str: str,
unicode: str,
float: str,
datetime.time: from_time,
datetime.datetime: from_datetime,
datetime.date: from_date,
}
def normalize_tag(tag):
u"""Normalizes tag name.
:param str tag: tag name to normalize
:rtype: str
:returns: normalized tag name
>>> normalize_tag('tag-NaMe')
'tag_name'
"""
return tag.lower().replace('-', '_')
def etree_to_dict(t, trim=True, **kw):
u"""Converts an lxml.etree object to Python dict.
>>> etree_to_dict(etree.Element('root'))
{'root': None}
:param etree.Element t: lxml tree to convert
:returns d: a dict representing the lxml tree ``t``
:rtype: dict
"""
d = {t.tag: {} if t.attrib else None}
children = list(t)
etree_to_dict_w_args = partial(etree_to_dict, trim=trim, **kw)
if children:
dd = defaultdict(list)
d = {t.tag: {}}
for dc in map(etree_to_dict_w_args, children):
for k, v in dc.iteritems():
# do not add Comment instance to the key
if k is not etree.Comment:
dd[k].append(v)
d[t.tag] = {k: v[0] if len(v) == 1 else v for k, v in dd.iteritems()}
if t.attrib:
d[t.tag].update(('@' + k, v) for k, v in t.attrib.iteritems())
if trim and t.text:
t.text = t.text.strip()
if t.text:
if t.tag is etree.Comment and not kw.get('without_comments'):
# adds a comments node
d['#comments'] = t.text
elif children or t.attrib:
d[t.tag]['#text'] = t.text
else:
d[t.tag] = t.text
return d
|
stxnext/mappet
|
mappet/mappet.py
|
Node.getattr
|
python
|
def getattr(self, key, default=None, callback=None):
u"""Getting the attribute of an element.
>>> xml = etree.Element('root')
>>> xml.text = 'text'
>>> Node(xml).getattr('text')
'text'
>>> Node(xml).getattr('text', callback=str.upper)
'TEXT'
>>> Node(xml).getattr('wrong_attr', default='default')
'default'
"""
value = self._xml.text if key == 'text' else self._xml.get(key, default)
return callback(value) if callback else value
|
u"""Getting the attribute of an element.
>>> xml = etree.Element('root')
>>> xml.text = 'text'
>>> Node(xml).getattr('text')
'text'
>>> Node(xml).getattr('text', callback=str.upper)
'TEXT'
>>> Node(xml).getattr('wrong_attr', default='default')
'default'
|
train
|
https://github.com/stxnext/mappet/blob/ac7468ac28ed82e45065b1e348cf865c8f73f0db/mappet/mappet.py#L75-L88
| null |
class Node(object):
u"""Base class representing an XML node."""
#: The lxml object representing parsed XML.
_xml = None
def __init__(self, xml):
self._xml = xml
def __repr__(self):
u"""Represent an XML node as a string with child count.
>>> xml = etree.Element('root')
>>> xml.set('attr1', 'val1')
>>> _ = etree.SubElement(xml, 'child')
>>> repr(Node(xml))
'<root attr1="val1"> (1)'
"""
return '<{tagname}{attributes}{closing_paren}> ({children})'.format(
tagname=self._xml.tag,
attributes=''.join(
[' {}="{}"'.format(
attr,
self._xml.attrib[attr]
) for attr in self._xml.attrib]
),
closing_paren='' if len(self._xml) else '/',
children=len(self._xml)
)
def __getitem__(self, key):
u"""Call to a list element.
Only calls to node attributes (i.e. starting with `@`) or
text nodes (starting with `#`) are allowed.
>>> xml = etree.Element('root')
>>> xml.set('attr1', 'val1')
>>> _ = etree.SubElement(xml, 'child')
>>> Node(xml)[0]
Traceback (most recent call last):
...
KeyError: 0
>>> Node(xml)['@attr1']
'val1'
"""
if self.is_key_attr_or_text(key):
return self.getattr(key[1:])
raise KeyError(key)
def setattr(self, key, value):
u"""Sets an attribute on a node.
>>> xml = etree.Element('root')
>>> Node(xml).setattr('text', 'text2')
>>> Node(xml).getattr('text')
'text2'
>>> Node(xml).setattr('attr', 'val')
>>> Node(xml).getattr('attr')
'val'
"""
if key == 'text':
self._xml.text = str(value)
else:
self._xml.set(key, str(value))
@property
def tag(self):
u"""Returns node's tag name."""
return self._xml.tag
@staticmethod
def is_key_attr_or_text(key):
return isinstance(key, basestring) and key.startswith(('@', '#'))
|
stxnext/mappet
|
mappet/mappet.py
|
Node.setattr
|
python
|
def setattr(self, key, value):
u"""Sets an attribute on a node.
>>> xml = etree.Element('root')
>>> Node(xml).setattr('text', 'text2')
>>> Node(xml).getattr('text')
'text2'
>>> Node(xml).setattr('attr', 'val')
>>> Node(xml).getattr('attr')
'val'
"""
if key == 'text':
self._xml.text = str(value)
else:
self._xml.set(key, str(value))
|
u"""Sets an attribute on a node.
>>> xml = etree.Element('root')
>>> Node(xml).setattr('text', 'text2')
>>> Node(xml).getattr('text')
'text2'
>>> Node(xml).setattr('attr', 'val')
>>> Node(xml).getattr('attr')
'val'
|
train
|
https://github.com/stxnext/mappet/blob/ac7468ac28ed82e45065b1e348cf865c8f73f0db/mappet/mappet.py#L90-L104
| null |
class Node(object):
u"""Base class representing an XML node."""
#: The lxml object representing parsed XML.
_xml = None
def __init__(self, xml):
self._xml = xml
def __repr__(self):
u"""Represent an XML node as a string with child count.
>>> xml = etree.Element('root')
>>> xml.set('attr1', 'val1')
>>> _ = etree.SubElement(xml, 'child')
>>> repr(Node(xml))
'<root attr1="val1"> (1)'
"""
return '<{tagname}{attributes}{closing_paren}> ({children})'.format(
tagname=self._xml.tag,
attributes=''.join(
[' {}="{}"'.format(
attr,
self._xml.attrib[attr]
) for attr in self._xml.attrib]
),
closing_paren='' if len(self._xml) else '/',
children=len(self._xml)
)
def __getitem__(self, key):
u"""Call to a list element.
Only calls to node attributes (i.e. starting with `@`) or
text nodes (starting with `#`) are allowed.
>>> xml = etree.Element('root')
>>> xml.set('attr1', 'val1')
>>> _ = etree.SubElement(xml, 'child')
>>> Node(xml)[0]
Traceback (most recent call last):
...
KeyError: 0
>>> Node(xml)['@attr1']
'val1'
"""
if self.is_key_attr_or_text(key):
return self.getattr(key[1:])
raise KeyError(key)
def getattr(self, key, default=None, callback=None):
u"""Getting the attribute of an element.
>>> xml = etree.Element('root')
>>> xml.text = 'text'
>>> Node(xml).getattr('text')
'text'
>>> Node(xml).getattr('text', callback=str.upper)
'TEXT'
>>> Node(xml).getattr('wrong_attr', default='default')
'default'
"""
value = self._xml.text if key == 'text' else self._xml.get(key, default)
return callback(value) if callback else value
@property
def tag(self):
u"""Returns node's tag name."""
return self._xml.tag
@staticmethod
def is_key_attr_or_text(key):
return isinstance(key, basestring) and key.startswith(('@', '#'))
|
stxnext/mappet
|
mappet/mappet.py
|
Literal.get
|
python
|
def get(self, default=None, callback=None):
u"""Returns leaf's value."""
value = self._xml.text if self._xml.text else default
return callback(value) if callback else value
|
u"""Returns leaf's value.
|
train
|
https://github.com/stxnext/mappet/blob/ac7468ac28ed82e45065b1e348cf865c8f73f0db/mappet/mappet.py#L209-L212
| null |
class Literal(Node):
u"""Represents a leaf in an XML tree."""
def __str__(self):
u"""Represents a leaf as a str.
Returns node's text as a string, if it's not None."""
return str(self.get())
#: Represents the leaf as unicode.
__unicode__ = __str__
def __repr__(self):
u"""Represents the leaf as its textual value."""
return str(self)
def __int__(self):
u"""Represents the literal as an int."""
return self.to_int()
def __float__(self):
u"""Represents the literal as an float."""
return self.to_float()
def __nonzero__(self):
u"""Represents the literal as an bool."""
return True if self._xml.text else False
def __eq__(self, other):
u"""Compares two leafs.
Assumes they are equal if the same are their:
* tagname,
* parent,
* text,
* attributes,
* position among parent's children.
"""
self_parent = self._xml.getparent()
other_parent = other._xml.getparent()
is_same_tag = self._xml.tag == other._xml.tag
is_same_parent = self_parent == other_parent
is_same_text = str(self) == str(other)
are_attrs_equal = (self._xml.attrib == other._xml.attrib)
is_same_position = self_parent.index(self._xml) == other_parent.index(other._xml)
return all((
is_same_tag,
is_same_parent,
is_same_text,
are_attrs_equal,
is_same_position,
))
def __hash__(self):
return hash(self._xml)
def __len__(self):
u"""Returns the length of node's text."""
return len(self._xml.text)
@staticmethod
def __dir__():
u"""Lists available casting methods."""
return sorted(set([fnc for fnc in helpers.__all__ if fnc.startswith('to_')]))
def __getattr__(self, name):
u"""Returns a function for converting node's value.
A leaf has no children, thus accessing its attributes returns a function.
"""
if name.startswith('to_') and name in dir(helpers):
fn = getattr(helpers, name)
return lambda: fn(self._xml.text)
raise AttributeError(name)
def __setitem__(self, key, value):
u"""Attribute assignment by dict access.
Extending the leaf in this case is not possible, since a string is returned.
"""
if self.is_key_attr_or_text(key):
self.setattr(key[1:], value)
def __add__(self, other):
u"""String concatenation."""
return self.to_str() + str(other)
def __radd__(self, other):
u"""Reverse string concatenation."""
return str(other) + self.to_str()
|
stxnext/mappet
|
mappet/mappet.py
|
Mappet.to_str
|
python
|
def to_str(self, pretty_print=False, encoding=None, **kw):
u"""Converts a node with all of it's children to a string.
Remaining arguments are passed to etree.tostring as is.
kwarg without_comments: bool because it works only in C14N flags:
'pretty print' and 'encoding' are ignored.
:param bool pretty_print: whether to format the output
:param str encoding: which encoding to use (ASCII by default)
:rtype: str
:returns: node's representation as a string
"""
if kw.get('without_comments') and not kw.get('method'):
kw.pop('without_comments')
kw['method'] = 'c14n'
kw['with_comments'] = False
return etree.tostring(
self._xml,
pretty_print=pretty_print,
encoding=encoding,
**kw
)
|
u"""Converts a node with all of it's children to a string.
Remaining arguments are passed to etree.tostring as is.
kwarg without_comments: bool because it works only in C14N flags:
'pretty print' and 'encoding' are ignored.
:param bool pretty_print: whether to format the output
:param str encoding: which encoding to use (ASCII by default)
:rtype: str
:returns: node's representation as a string
|
train
|
https://github.com/stxnext/mappet/blob/ac7468ac28ed82e45065b1e348cf865c8f73f0db/mappet/mappet.py#L384-L406
| null |
class Mappet(Node):
u"""A node that may have children."""
_aliases = None
u"""Dictionary with node aliases.
The keys are normalized tagnames, values are the original tagnames.
_aliases = {
'car_model_desc': 'car-model-desc',
'car': 'Car',
}
"""
def __init__(self, xml):
u"""Creates the mappet object from either lxml object, a string or a dict.
If you pass a dict without root element, one will be created for you with
'root' as tag name.
>>> Mappet({'a': {'#text': 'list_elem_1', '@attr1': 'val1'}}).to_str()
'<a attr1="val1">list_elem_1</a>'
>>> Mappet({'#text': 'list_elem_1', '@attr1': 'val1'}).to_str()
'<root attr1="val1">list_elem_1</root>'
"""
if etree.iselement(xml):
self._xml = xml
elif isinstance(xml, basestring):
self._xml = etree.fromstring(xml)
elif isinstance(xml, dict):
if len(xml) == 1:
root_name = xml.keys()[0]
body = xml[root_name]
else:
root_name = 'root'
body = xml
self._xml = helpers.dict_to_etree(body, etree.Element(root_name))
else:
raise AttributeError('Specified data cannot be used to construct a Mappet object.')
def __nonzero__(self):
u"""Checks if this node has children, otherwise returns False."""
return self.has_children()
def __len__(self):
u"""Returns the children count."""
return len(self._xml)
def __dir__(self):
u"""Returns a list of children and available helper methods."""
return sorted(self.keys() | {m for m in dir(self.__class__) if m.startswith('to_')})
def __deepcopy__(self, memodict):
u"""Performs a deepcopy on the underlying XML tree."""
return self.__class__(deepcopy(self._xml))
def __getattr__(self, name):
u"""Attribute access.
Returns a list o children, if there is more than 1.
Returns a child, if there is exactly 1.
"""
children = self.children(name)
if len(children) > 1:
return children
elif len(children) == 1:
return children[0]
def __setattr__(self, name, value):
u"""Node attribute assignment.
Calls ``set`` in the end.
"""
# Only elements that aren't a part of class definition are overwritten.
if name not in dir(self.__class__):
return self.set(name, value)
return super(Mappet, self).__setattr__(name, value)
def __delattr__(self, key):
u"""Node removal."""
# Searches among aliases, if none is found returns the original key.
tag = self._get_aliases().get(key, key)
self.__delitem__(tag)
def __getitem__(self, key):
u"""Dictionary access."""
# Checks if the call isn't to an attribute.
if isinstance(key, basestring) and not key.startswith('@'):
children = self.children(key)
if len(children) == 1:
children = children[0]
# Return the value if it's a leaf.
if isinstance(children, Literal):
return children.get()
return children
return super(Mappet, self).__getitem__(key)
def __delitem__(self, key):
u"""Removes all children with a given key."""
# Checks if name is not a part of class definition.
if key not in dir(self.__class__):
for child in self._xml.iterchildren(tag=key):
self._xml.remove(child)
def __eq__(self, other):
u"""Compares mappet objects.
Two mappet objects are deemed equal if the lxmls object they represent are equal.
"""
return etree.tostring(self._xml) == etree.tostring(other._xml)
def __contains__(self, path):
u"""Check if object contains given path."""
elem = self.sget(path)
return not (elem is None or elem is NONE_NODE)
def __getstate__(self):
u"""Converts the lxml to string for Pickling."""
return {
'_xml': etree.tostring(self._xml, pretty_print=False)
}
def __setstate__(self, dict_):
u"""Restores a Pickled mappet object."""
self._xml = etree.fromstring(dict_['_xml'])
def __iter__(self):
u"""Returns children as an iterator."""
return self.iter_children()
def has_children(self):
u"""Returns true if a node has children."""
return bool(len(self))
def iter_children(self, key=None):
u"""Iterates over children.
:param key: A key for filtering children by tagname.
"""
tag = None
if key:
tag = self._get_aliases().get(key)
if not tag:
raise KeyError(key)
for child in self._xml.iterchildren(tag=tag):
if len(child):
yield self.__class__(child)
else:
yield Literal(child)
def children(self, key=None):
u"""Returns node's children.
:param key: A key for filtering children by tagname.
"""
return list(self.iter_children(key))
def update(self, **kwargs):
u"""Updating or creation of new simple nodes.
Each dict key is used as a tagname and value as text.
"""
for key, value in kwargs.items():
helper = helpers.CAST_DICT.get(type(value), str)
tag = self._get_aliases().get(key, key)
elements = list(self._xml.iterchildren(tag=tag))
if elements:
for element in elements:
element.text = helper(value)
else:
element = etree.Element(key)
element.text = helper(value)
self._xml.append(element)
self._aliases = None
def sget(self, path, default=NONE_NODE):
u"""Enables access to nodes if one or more of them don't exist.
Example:
>>> m = Mappet('<root><tag attr1="attr text">text value</tag></root>')
>>> m.sget('tag')
text value
>>> m.sget('tag.@attr1')
'attr text'
>>> m.sget('tag.#text')
'text value'
>>> m.sget('reply.vms_model_cars.car.0.params.doors')
NONE_NODE
Accessing nonexistent path returns None-like object with mocked
converting functions which returns None:
>>> m.sget('reply.fake_node').to_dict() is None
True
"""
attrs = str(path).split(".")
text_or_attr = None
last_attr = attrs[-1]
# Case of getting text or attribute
if last_attr == '#text' or last_attr.startswith('@'):
# #text => text, @attr => attr
text_or_attr = last_attr[1:]
attrs = attrs[:-1]
# When getting #text and @attr we want default value to be None.
if default is NONE_NODE:
default = None
my_object = self
for attr in attrs:
try:
if isinstance(my_object, (list, tuple)) and re.match('^\-?\d+$', attr):
my_object_next = my_object[int(attr)]
else:
my_object_next = getattr(my_object, attr)
my_object = my_object_next
except (AttributeError, KeyError, IndexError):
return default
# Return #text or @attr
if text_or_attr:
try:
return my_object.getattr(text_or_attr)
except AttributeError:
# myObject can be a list.
return None
else:
return my_object
def create(self, tag, value):
u"""Creates a node, if it doesn't exist yet.
Unlike attribute access, this allows to pass a node's name with hyphens.
Those hyphens will be normalized automatically.
In case the required element already exists, raises an exception.
Updating/overwriting should be done using `update``.
"""
child_tags = {child.tag for child in self._xml}
if tag in child_tags:
raise KeyError('Node {} already exists in XML tree.'.format(tag))
self.set(tag, value)
def set(self, name, value):
u"""Assigns a new XML structure to the node.
A literal value, dict or list can be passed in. Works for all nested levels.
Dictionary:
>>> m = Mappet('<root/>')
>>> m.head = {'a': 'A', 'b': {'#text': 'B', '@attr': 'val'}}
>>> m.head.to_str()
'<head><a>A</a><b attr="val">B</b></head>'
List:
>>> m.head = [{'a': i} for i in 'ABC']
>>> m.head.to_str()
'<head><a>A</a><a>B</a><a>C</a></head>'
Literals:
>>> m.head.leaf = 'A'
>>> m.head.leaf.get()
'A'
"""
try:
# Searches for a node to assign to.
element = next(self._xml.iterchildren(tag=name))
except StopIteration:
# There is no such node in the XML tree. We create a new one
# with current root as parent (self._xml).
element = etree.SubElement(self._xml, name)
if isinstance(value, dict):
self.assign_dict(element, value)
elif isinstance(value, (list, tuple, set)):
self.assign_sequence_or_set(element, value)
else:
# Literal value.
self.assign_literal(element, value)
# Clear the aliases.
self._aliases = None
def assign_dict(self, node, xml_dict):
"""Assigns a Python dict to a ``lxml`` node.
:param node: A node to assign the dict to.
:param xml_dict: The dict with attributes/children to use.
"""
new_node = etree.Element(node.tag)
# Replaces the previous node with the new one
self._xml.replace(node, new_node)
# Copies #text and @attrs from the xml_dict
helpers.dict_to_etree(xml_dict, new_node)
@staticmethod
def assign_sequence_or_set(element, value):
element.clear()
for item in value:
temp_element = etree.Element('temp')
helpers.dict_to_etree(item, temp_element)
for child in temp_element.iterchildren():
element.append(child)
del temp_element
@staticmethod
def assign_literal(element, value):
u"""Assigns a literal.
If a given node doesn't exist, it will be created.
:param etree.Element element: element to which we assign.
:param value: the value to assign
"""
# Searches for a conversion method specific to the type of value.
helper = helpers.CAST_DICT.get(type(value), str)
# Removes all children and attributes.
element.clear()
element.text = helper(value)
def to_dict(self, **kw):
u"""Converts the lxml object to a dict.
possible kwargs:
without_comments: bool
"""
_, value = helpers.etree_to_dict(self._xml, **kw).popitem()
return value
def _get_aliases(self):
u"""Creates a dict with aliases.
The key is a normalized tagname, value the original tagname.
"""
if self._aliases is None:
self._aliases = {}
if self._xml is not None:
for child in self._xml.iterchildren():
self._aliases[helpers.normalize_tag(child.tag)] = child.tag
return self._aliases
def xpath(
self,
path,
namespaces=None,
regexp=False,
smart_strings=True,
single_use=False,
):
u"""Executes XPath query on the ``lxml`` object and returns a correct object.
:param str path: XPath string e.g., 'cars'/'car'
:param str/dict namespaces: e.g., 'exslt', 're' or
``{'re': "http://exslt.org/regular-expressions"}``
:param bool regexp: if ``True`` and no namespaces is provided, it will use
``exslt`` namespace
:param bool smart_strings:
:param bool single_use: faster method for using only once. Does not
create ``XPathEvaluator`` instance.
>>> root = mappet.Mappet("<root><a>aB</a><b>aBc</b></root>")
>>> root.XPath(
"//*[re:test(., '^abc$', 'i')]",
namespaces='exslt',
regexp=True,
)
"""
if (
namespaces in ['exslt', 're'] or
(regexp and not namespaces)
):
namespaces = {'re': "http://exslt.org/regular-expressions"}
if single_use:
node = self._xml.xpath(path)
else:
xpe = self.xpath_evaluator(
namespaces=namespaces,
regexp=regexp,
smart_strings=smart_strings
)
node = xpe(path)
if len(node) == 1:
node = node[0]
if len(node):
return self.__class__(node)
else:
return Literal(node)
return node
def xpath_evaluator(self, namespaces=None, regexp=False, smart_strings=True):
u"""Creates an XPathEvaluator instance for an ElementTree or an Element.
:returns: ``XPathEvaluator`` instance
"""
return etree.XPathEvaluator(
self._xml,
namespaces=namespaces,
regexp=regexp,
smart_strings=smart_strings
)
def keys(self):
"""Returns a set of node's keys."""
return set(self._get_aliases().keys())
|
stxnext/mappet
|
mappet/mappet.py
|
Mappet.iter_children
|
python
|
def iter_children(self, key=None):
u"""Iterates over children.
:param key: A key for filtering children by tagname.
"""
tag = None
if key:
tag = self._get_aliases().get(key)
if not tag:
raise KeyError(key)
for child in self._xml.iterchildren(tag=tag):
if len(child):
yield self.__class__(child)
else:
yield Literal(child)
|
u"""Iterates over children.
:param key: A key for filtering children by tagname.
|
train
|
https://github.com/stxnext/mappet/blob/ac7468ac28ed82e45065b1e348cf865c8f73f0db/mappet/mappet.py#L412-L429
|
[
"def _get_aliases(self):\n u\"\"\"Creates a dict with aliases.\n\n The key is a normalized tagname, value the original tagname.\n \"\"\"\n if self._aliases is None:\n self._aliases = {}\n\n if self._xml is not None:\n for child in self._xml.iterchildren():\n self._aliases[helpers.normalize_tag(child.tag)] = child.tag\n\n return self._aliases\n"
] |
class Mappet(Node):
u"""A node that may have children."""
_aliases = None
u"""Dictionary with node aliases.
The keys are normalized tagnames, values are the original tagnames.
_aliases = {
'car_model_desc': 'car-model-desc',
'car': 'Car',
}
"""
def __init__(self, xml):
u"""Creates the mappet object from either lxml object, a string or a dict.
If you pass a dict without root element, one will be created for you with
'root' as tag name.
>>> Mappet({'a': {'#text': 'list_elem_1', '@attr1': 'val1'}}).to_str()
'<a attr1="val1">list_elem_1</a>'
>>> Mappet({'#text': 'list_elem_1', '@attr1': 'val1'}).to_str()
'<root attr1="val1">list_elem_1</root>'
"""
if etree.iselement(xml):
self._xml = xml
elif isinstance(xml, basestring):
self._xml = etree.fromstring(xml)
elif isinstance(xml, dict):
if len(xml) == 1:
root_name = xml.keys()[0]
body = xml[root_name]
else:
root_name = 'root'
body = xml
self._xml = helpers.dict_to_etree(body, etree.Element(root_name))
else:
raise AttributeError('Specified data cannot be used to construct a Mappet object.')
def __nonzero__(self):
u"""Checks if this node has children, otherwise returns False."""
return self.has_children()
def __len__(self):
u"""Returns the children count."""
return len(self._xml)
def __dir__(self):
u"""Returns a list of children and available helper methods."""
return sorted(self.keys() | {m for m in dir(self.__class__) if m.startswith('to_')})
def __deepcopy__(self, memodict):
u"""Performs a deepcopy on the underlying XML tree."""
return self.__class__(deepcopy(self._xml))
def __getattr__(self, name):
u"""Attribute access.
Returns a list o children, if there is more than 1.
Returns a child, if there is exactly 1.
"""
children = self.children(name)
if len(children) > 1:
return children
elif len(children) == 1:
return children[0]
def __setattr__(self, name, value):
u"""Node attribute assignment.
Calls ``set`` in the end.
"""
# Only elements that aren't a part of class definition are overwritten.
if name not in dir(self.__class__):
return self.set(name, value)
return super(Mappet, self).__setattr__(name, value)
def __delattr__(self, key):
u"""Node removal."""
# Searches among aliases, if none is found returns the original key.
tag = self._get_aliases().get(key, key)
self.__delitem__(tag)
def __getitem__(self, key):
u"""Dictionary access."""
# Checks if the call isn't to an attribute.
if isinstance(key, basestring) and not key.startswith('@'):
children = self.children(key)
if len(children) == 1:
children = children[0]
# Return the value if it's a leaf.
if isinstance(children, Literal):
return children.get()
return children
return super(Mappet, self).__getitem__(key)
def __delitem__(self, key):
u"""Removes all children with a given key."""
# Checks if name is not a part of class definition.
if key not in dir(self.__class__):
for child in self._xml.iterchildren(tag=key):
self._xml.remove(child)
def __eq__(self, other):
u"""Compares mappet objects.
Two mappet objects are deemed equal if the lxmls object they represent are equal.
"""
return etree.tostring(self._xml) == etree.tostring(other._xml)
def __contains__(self, path):
u"""Check if object contains given path."""
elem = self.sget(path)
return not (elem is None or elem is NONE_NODE)
def __getstate__(self):
u"""Converts the lxml to string for Pickling."""
return {
'_xml': etree.tostring(self._xml, pretty_print=False)
}
def __setstate__(self, dict_):
u"""Restores a Pickled mappet object."""
self._xml = etree.fromstring(dict_['_xml'])
def __iter__(self):
u"""Returns children as an iterator."""
return self.iter_children()
def to_str(self, pretty_print=False, encoding=None, **kw):
u"""Converts a node with all of it's children to a string.
Remaining arguments are passed to etree.tostring as is.
kwarg without_comments: bool because it works only in C14N flags:
'pretty print' and 'encoding' are ignored.
:param bool pretty_print: whether to format the output
:param str encoding: which encoding to use (ASCII by default)
:rtype: str
:returns: node's representation as a string
"""
if kw.get('without_comments') and not kw.get('method'):
kw.pop('without_comments')
kw['method'] = 'c14n'
kw['with_comments'] = False
return etree.tostring(
self._xml,
pretty_print=pretty_print,
encoding=encoding,
**kw
)
def has_children(self):
u"""Returns true if a node has children."""
return bool(len(self))
def children(self, key=None):
u"""Returns node's children.
:param key: A key for filtering children by tagname.
"""
return list(self.iter_children(key))
def update(self, **kwargs):
u"""Updating or creation of new simple nodes.
Each dict key is used as a tagname and value as text.
"""
for key, value in kwargs.items():
helper = helpers.CAST_DICT.get(type(value), str)
tag = self._get_aliases().get(key, key)
elements = list(self._xml.iterchildren(tag=tag))
if elements:
for element in elements:
element.text = helper(value)
else:
element = etree.Element(key)
element.text = helper(value)
self._xml.append(element)
self._aliases = None
def sget(self, path, default=NONE_NODE):
u"""Enables access to nodes if one or more of them don't exist.
Example:
>>> m = Mappet('<root><tag attr1="attr text">text value</tag></root>')
>>> m.sget('tag')
text value
>>> m.sget('tag.@attr1')
'attr text'
>>> m.sget('tag.#text')
'text value'
>>> m.sget('reply.vms_model_cars.car.0.params.doors')
NONE_NODE
Accessing nonexistent path returns None-like object with mocked
converting functions which returns None:
>>> m.sget('reply.fake_node').to_dict() is None
True
"""
attrs = str(path).split(".")
text_or_attr = None
last_attr = attrs[-1]
# Case of getting text or attribute
if last_attr == '#text' or last_attr.startswith('@'):
# #text => text, @attr => attr
text_or_attr = last_attr[1:]
attrs = attrs[:-1]
# When getting #text and @attr we want default value to be None.
if default is NONE_NODE:
default = None
my_object = self
for attr in attrs:
try:
if isinstance(my_object, (list, tuple)) and re.match('^\-?\d+$', attr):
my_object_next = my_object[int(attr)]
else:
my_object_next = getattr(my_object, attr)
my_object = my_object_next
except (AttributeError, KeyError, IndexError):
return default
# Return #text or @attr
if text_or_attr:
try:
return my_object.getattr(text_or_attr)
except AttributeError:
# myObject can be a list.
return None
else:
return my_object
def create(self, tag, value):
u"""Creates a node, if it doesn't exist yet.
Unlike attribute access, this allows to pass a node's name with hyphens.
Those hyphens will be normalized automatically.
In case the required element already exists, raises an exception.
Updating/overwriting should be done using `update``.
"""
child_tags = {child.tag for child in self._xml}
if tag in child_tags:
raise KeyError('Node {} already exists in XML tree.'.format(tag))
self.set(tag, value)
def set(self, name, value):
u"""Assigns a new XML structure to the node.
A literal value, dict or list can be passed in. Works for all nested levels.
Dictionary:
>>> m = Mappet('<root/>')
>>> m.head = {'a': 'A', 'b': {'#text': 'B', '@attr': 'val'}}
>>> m.head.to_str()
'<head><a>A</a><b attr="val">B</b></head>'
List:
>>> m.head = [{'a': i} for i in 'ABC']
>>> m.head.to_str()
'<head><a>A</a><a>B</a><a>C</a></head>'
Literals:
>>> m.head.leaf = 'A'
>>> m.head.leaf.get()
'A'
"""
try:
# Searches for a node to assign to.
element = next(self._xml.iterchildren(tag=name))
except StopIteration:
# There is no such node in the XML tree. We create a new one
# with current root as parent (self._xml).
element = etree.SubElement(self._xml, name)
if isinstance(value, dict):
self.assign_dict(element, value)
elif isinstance(value, (list, tuple, set)):
self.assign_sequence_or_set(element, value)
else:
# Literal value.
self.assign_literal(element, value)
# Clear the aliases.
self._aliases = None
def assign_dict(self, node, xml_dict):
"""Assigns a Python dict to a ``lxml`` node.
:param node: A node to assign the dict to.
:param xml_dict: The dict with attributes/children to use.
"""
new_node = etree.Element(node.tag)
# Replaces the previous node with the new one
self._xml.replace(node, new_node)
# Copies #text and @attrs from the xml_dict
helpers.dict_to_etree(xml_dict, new_node)
@staticmethod
def assign_sequence_or_set(element, value):
element.clear()
for item in value:
temp_element = etree.Element('temp')
helpers.dict_to_etree(item, temp_element)
for child in temp_element.iterchildren():
element.append(child)
del temp_element
@staticmethod
def assign_literal(element, value):
u"""Assigns a literal.
If a given node doesn't exist, it will be created.
:param etree.Element element: element to which we assign.
:param value: the value to assign
"""
# Searches for a conversion method specific to the type of value.
helper = helpers.CAST_DICT.get(type(value), str)
# Removes all children and attributes.
element.clear()
element.text = helper(value)
def to_dict(self, **kw):
u"""Converts the lxml object to a dict.
possible kwargs:
without_comments: bool
"""
_, value = helpers.etree_to_dict(self._xml, **kw).popitem()
return value
def _get_aliases(self):
u"""Creates a dict with aliases.
The key is a normalized tagname, value the original tagname.
"""
if self._aliases is None:
self._aliases = {}
if self._xml is not None:
for child in self._xml.iterchildren():
self._aliases[helpers.normalize_tag(child.tag)] = child.tag
return self._aliases
def xpath(
self,
path,
namespaces=None,
regexp=False,
smart_strings=True,
single_use=False,
):
u"""Executes XPath query on the ``lxml`` object and returns a correct object.
:param str path: XPath string e.g., 'cars'/'car'
:param str/dict namespaces: e.g., 'exslt', 're' or
``{'re': "http://exslt.org/regular-expressions"}``
:param bool regexp: if ``True`` and no namespaces is provided, it will use
``exslt`` namespace
:param bool smart_strings:
:param bool single_use: faster method for using only once. Does not
create ``XPathEvaluator`` instance.
>>> root = mappet.Mappet("<root><a>aB</a><b>aBc</b></root>")
>>> root.XPath(
"//*[re:test(., '^abc$', 'i')]",
namespaces='exslt',
regexp=True,
)
"""
if (
namespaces in ['exslt', 're'] or
(regexp and not namespaces)
):
namespaces = {'re': "http://exslt.org/regular-expressions"}
if single_use:
node = self._xml.xpath(path)
else:
xpe = self.xpath_evaluator(
namespaces=namespaces,
regexp=regexp,
smart_strings=smart_strings
)
node = xpe(path)
if len(node) == 1:
node = node[0]
if len(node):
return self.__class__(node)
else:
return Literal(node)
return node
def xpath_evaluator(self, namespaces=None, regexp=False, smart_strings=True):
u"""Creates an XPathEvaluator instance for an ElementTree or an Element.
:returns: ``XPathEvaluator`` instance
"""
return etree.XPathEvaluator(
self._xml,
namespaces=namespaces,
regexp=regexp,
smart_strings=smart_strings
)
def keys(self):
"""Returns a set of node's keys."""
return set(self._get_aliases().keys())
|
stxnext/mappet
|
mappet/mappet.py
|
Mappet.update
|
python
|
def update(self, **kwargs):
u"""Updating or creation of new simple nodes.
Each dict key is used as a tagname and value as text.
"""
for key, value in kwargs.items():
helper = helpers.CAST_DICT.get(type(value), str)
tag = self._get_aliases().get(key, key)
elements = list(self._xml.iterchildren(tag=tag))
if elements:
for element in elements:
element.text = helper(value)
else:
element = etree.Element(key)
element.text = helper(value)
self._xml.append(element)
self._aliases = None
|
u"""Updating or creation of new simple nodes.
Each dict key is used as a tagname and value as text.
|
train
|
https://github.com/stxnext/mappet/blob/ac7468ac28ed82e45065b1e348cf865c8f73f0db/mappet/mappet.py#L438-L455
|
[
"def _get_aliases(self):\n u\"\"\"Creates a dict with aliases.\n\n The key is a normalized tagname, value the original tagname.\n \"\"\"\n if self._aliases is None:\n self._aliases = {}\n\n if self._xml is not None:\n for child in self._xml.iterchildren():\n self._aliases[helpers.normalize_tag(child.tag)] = child.tag\n\n return self._aliases\n"
] |
class Mappet(Node):
u"""A node that may have children."""
_aliases = None
u"""Dictionary with node aliases.
The keys are normalized tagnames, values are the original tagnames.
_aliases = {
'car_model_desc': 'car-model-desc',
'car': 'Car',
}
"""
def __init__(self, xml):
u"""Creates the mappet object from either lxml object, a string or a dict.
If you pass a dict without root element, one will be created for you with
'root' as tag name.
>>> Mappet({'a': {'#text': 'list_elem_1', '@attr1': 'val1'}}).to_str()
'<a attr1="val1">list_elem_1</a>'
>>> Mappet({'#text': 'list_elem_1', '@attr1': 'val1'}).to_str()
'<root attr1="val1">list_elem_1</root>'
"""
if etree.iselement(xml):
self._xml = xml
elif isinstance(xml, basestring):
self._xml = etree.fromstring(xml)
elif isinstance(xml, dict):
if len(xml) == 1:
root_name = xml.keys()[0]
body = xml[root_name]
else:
root_name = 'root'
body = xml
self._xml = helpers.dict_to_etree(body, etree.Element(root_name))
else:
raise AttributeError('Specified data cannot be used to construct a Mappet object.')
def __nonzero__(self):
u"""Checks if this node has children, otherwise returns False."""
return self.has_children()
def __len__(self):
u"""Returns the children count."""
return len(self._xml)
def __dir__(self):
u"""Returns a list of children and available helper methods."""
return sorted(self.keys() | {m for m in dir(self.__class__) if m.startswith('to_')})
def __deepcopy__(self, memodict):
u"""Performs a deepcopy on the underlying XML tree."""
return self.__class__(deepcopy(self._xml))
def __getattr__(self, name):
u"""Attribute access.
Returns a list o children, if there is more than 1.
Returns a child, if there is exactly 1.
"""
children = self.children(name)
if len(children) > 1:
return children
elif len(children) == 1:
return children[0]
def __setattr__(self, name, value):
u"""Node attribute assignment.
Calls ``set`` in the end.
"""
# Only elements that aren't a part of class definition are overwritten.
if name not in dir(self.__class__):
return self.set(name, value)
return super(Mappet, self).__setattr__(name, value)
def __delattr__(self, key):
u"""Node removal."""
# Searches among aliases, if none is found returns the original key.
tag = self._get_aliases().get(key, key)
self.__delitem__(tag)
def __getitem__(self, key):
u"""Dictionary access."""
# Checks if the call isn't to an attribute.
if isinstance(key, basestring) and not key.startswith('@'):
children = self.children(key)
if len(children) == 1:
children = children[0]
# Return the value if it's a leaf.
if isinstance(children, Literal):
return children.get()
return children
return super(Mappet, self).__getitem__(key)
def __delitem__(self, key):
u"""Removes all children with a given key."""
# Checks if name is not a part of class definition.
if key not in dir(self.__class__):
for child in self._xml.iterchildren(tag=key):
self._xml.remove(child)
def __eq__(self, other):
u"""Compares mappet objects.
Two mappet objects are deemed equal if the lxmls object they represent are equal.
"""
return etree.tostring(self._xml) == etree.tostring(other._xml)
def __contains__(self, path):
u"""Check if object contains given path."""
elem = self.sget(path)
return not (elem is None or elem is NONE_NODE)
def __getstate__(self):
u"""Converts the lxml to string for Pickling."""
return {
'_xml': etree.tostring(self._xml, pretty_print=False)
}
def __setstate__(self, dict_):
u"""Restores a Pickled mappet object."""
self._xml = etree.fromstring(dict_['_xml'])
def __iter__(self):
u"""Returns children as an iterator."""
return self.iter_children()
def to_str(self, pretty_print=False, encoding=None, **kw):
u"""Converts a node with all of it's children to a string.
Remaining arguments are passed to etree.tostring as is.
kwarg without_comments: bool because it works only in C14N flags:
'pretty print' and 'encoding' are ignored.
:param bool pretty_print: whether to format the output
:param str encoding: which encoding to use (ASCII by default)
:rtype: str
:returns: node's representation as a string
"""
if kw.get('without_comments') and not kw.get('method'):
kw.pop('without_comments')
kw['method'] = 'c14n'
kw['with_comments'] = False
return etree.tostring(
self._xml,
pretty_print=pretty_print,
encoding=encoding,
**kw
)
def has_children(self):
u"""Returns true if a node has children."""
return bool(len(self))
def iter_children(self, key=None):
u"""Iterates over children.
:param key: A key for filtering children by tagname.
"""
tag = None
if key:
tag = self._get_aliases().get(key)
if not tag:
raise KeyError(key)
for child in self._xml.iterchildren(tag=tag):
if len(child):
yield self.__class__(child)
else:
yield Literal(child)
def children(self, key=None):
u"""Returns node's children.
:param key: A key for filtering children by tagname.
"""
return list(self.iter_children(key))
def sget(self, path, default=NONE_NODE):
u"""Enables access to nodes if one or more of them don't exist.
Example:
>>> m = Mappet('<root><tag attr1="attr text">text value</tag></root>')
>>> m.sget('tag')
text value
>>> m.sget('tag.@attr1')
'attr text'
>>> m.sget('tag.#text')
'text value'
>>> m.sget('reply.vms_model_cars.car.0.params.doors')
NONE_NODE
Accessing nonexistent path returns None-like object with mocked
converting functions which returns None:
>>> m.sget('reply.fake_node').to_dict() is None
True
"""
attrs = str(path).split(".")
text_or_attr = None
last_attr = attrs[-1]
# Case of getting text or attribute
if last_attr == '#text' or last_attr.startswith('@'):
# #text => text, @attr => attr
text_or_attr = last_attr[1:]
attrs = attrs[:-1]
# When getting #text and @attr we want default value to be None.
if default is NONE_NODE:
default = None
my_object = self
for attr in attrs:
try:
if isinstance(my_object, (list, tuple)) and re.match('^\-?\d+$', attr):
my_object_next = my_object[int(attr)]
else:
my_object_next = getattr(my_object, attr)
my_object = my_object_next
except (AttributeError, KeyError, IndexError):
return default
# Return #text or @attr
if text_or_attr:
try:
return my_object.getattr(text_or_attr)
except AttributeError:
# myObject can be a list.
return None
else:
return my_object
def create(self, tag, value):
u"""Creates a node, if it doesn't exist yet.
Unlike attribute access, this allows to pass a node's name with hyphens.
Those hyphens will be normalized automatically.
In case the required element already exists, raises an exception.
Updating/overwriting should be done using `update``.
"""
child_tags = {child.tag for child in self._xml}
if tag in child_tags:
raise KeyError('Node {} already exists in XML tree.'.format(tag))
self.set(tag, value)
def set(self, name, value):
u"""Assigns a new XML structure to the node.
A literal value, dict or list can be passed in. Works for all nested levels.
Dictionary:
>>> m = Mappet('<root/>')
>>> m.head = {'a': 'A', 'b': {'#text': 'B', '@attr': 'val'}}
>>> m.head.to_str()
'<head><a>A</a><b attr="val">B</b></head>'
List:
>>> m.head = [{'a': i} for i in 'ABC']
>>> m.head.to_str()
'<head><a>A</a><a>B</a><a>C</a></head>'
Literals:
>>> m.head.leaf = 'A'
>>> m.head.leaf.get()
'A'
"""
try:
# Searches for a node to assign to.
element = next(self._xml.iterchildren(tag=name))
except StopIteration:
# There is no such node in the XML tree. We create a new one
# with current root as parent (self._xml).
element = etree.SubElement(self._xml, name)
if isinstance(value, dict):
self.assign_dict(element, value)
elif isinstance(value, (list, tuple, set)):
self.assign_sequence_or_set(element, value)
else:
# Literal value.
self.assign_literal(element, value)
# Clear the aliases.
self._aliases = None
def assign_dict(self, node, xml_dict):
"""Assigns a Python dict to a ``lxml`` node.
:param node: A node to assign the dict to.
:param xml_dict: The dict with attributes/children to use.
"""
new_node = etree.Element(node.tag)
# Replaces the previous node with the new one
self._xml.replace(node, new_node)
# Copies #text and @attrs from the xml_dict
helpers.dict_to_etree(xml_dict, new_node)
@staticmethod
def assign_sequence_or_set(element, value):
element.clear()
for item in value:
temp_element = etree.Element('temp')
helpers.dict_to_etree(item, temp_element)
for child in temp_element.iterchildren():
element.append(child)
del temp_element
@staticmethod
def assign_literal(element, value):
u"""Assigns a literal.
If a given node doesn't exist, it will be created.
:param etree.Element element: element to which we assign.
:param value: the value to assign
"""
# Searches for a conversion method specific to the type of value.
helper = helpers.CAST_DICT.get(type(value), str)
# Removes all children and attributes.
element.clear()
element.text = helper(value)
def to_dict(self, **kw):
u"""Converts the lxml object to a dict.
possible kwargs:
without_comments: bool
"""
_, value = helpers.etree_to_dict(self._xml, **kw).popitem()
return value
def _get_aliases(self):
u"""Creates a dict with aliases.
The key is a normalized tagname, value the original tagname.
"""
if self._aliases is None:
self._aliases = {}
if self._xml is not None:
for child in self._xml.iterchildren():
self._aliases[helpers.normalize_tag(child.tag)] = child.tag
return self._aliases
def xpath(
self,
path,
namespaces=None,
regexp=False,
smart_strings=True,
single_use=False,
):
u"""Executes XPath query on the ``lxml`` object and returns a correct object.
:param str path: XPath string e.g., 'cars'/'car'
:param str/dict namespaces: e.g., 'exslt', 're' or
``{'re': "http://exslt.org/regular-expressions"}``
:param bool regexp: if ``True`` and no namespaces is provided, it will use
``exslt`` namespace
:param bool smart_strings:
:param bool single_use: faster method for using only once. Does not
create ``XPathEvaluator`` instance.
>>> root = mappet.Mappet("<root><a>aB</a><b>aBc</b></root>")
>>> root.XPath(
"//*[re:test(., '^abc$', 'i')]",
namespaces='exslt',
regexp=True,
)
"""
if (
namespaces in ['exslt', 're'] or
(regexp and not namespaces)
):
namespaces = {'re': "http://exslt.org/regular-expressions"}
if single_use:
node = self._xml.xpath(path)
else:
xpe = self.xpath_evaluator(
namespaces=namespaces,
regexp=regexp,
smart_strings=smart_strings
)
node = xpe(path)
if len(node) == 1:
node = node[0]
if len(node):
return self.__class__(node)
else:
return Literal(node)
return node
def xpath_evaluator(self, namespaces=None, regexp=False, smart_strings=True):
u"""Creates an XPathEvaluator instance for an ElementTree or an Element.
:returns: ``XPathEvaluator`` instance
"""
return etree.XPathEvaluator(
self._xml,
namespaces=namespaces,
regexp=regexp,
smart_strings=smart_strings
)
def keys(self):
"""Returns a set of node's keys."""
return set(self._get_aliases().keys())
|
stxnext/mappet
|
mappet/mappet.py
|
Mappet.sget
|
python
|
def sget(self, path, default=NONE_NODE):
u"""Enables access to nodes if one or more of them don't exist.
Example:
>>> m = Mappet('<root><tag attr1="attr text">text value</tag></root>')
>>> m.sget('tag')
text value
>>> m.sget('tag.@attr1')
'attr text'
>>> m.sget('tag.#text')
'text value'
>>> m.sget('reply.vms_model_cars.car.0.params.doors')
NONE_NODE
Accessing nonexistent path returns None-like object with mocked
converting functions which returns None:
>>> m.sget('reply.fake_node').to_dict() is None
True
"""
attrs = str(path).split(".")
text_or_attr = None
last_attr = attrs[-1]
# Case of getting text or attribute
if last_attr == '#text' or last_attr.startswith('@'):
# #text => text, @attr => attr
text_or_attr = last_attr[1:]
attrs = attrs[:-1]
# When getting #text and @attr we want default value to be None.
if default is NONE_NODE:
default = None
my_object = self
for attr in attrs:
try:
if isinstance(my_object, (list, tuple)) and re.match('^\-?\d+$', attr):
my_object_next = my_object[int(attr)]
else:
my_object_next = getattr(my_object, attr)
my_object = my_object_next
except (AttributeError, KeyError, IndexError):
return default
# Return #text or @attr
if text_or_attr:
try:
return my_object.getattr(text_or_attr)
except AttributeError:
# myObject can be a list.
return None
else:
return my_object
|
u"""Enables access to nodes if one or more of them don't exist.
Example:
>>> m = Mappet('<root><tag attr1="attr text">text value</tag></root>')
>>> m.sget('tag')
text value
>>> m.sget('tag.@attr1')
'attr text'
>>> m.sget('tag.#text')
'text value'
>>> m.sget('reply.vms_model_cars.car.0.params.doors')
NONE_NODE
Accessing nonexistent path returns None-like object with mocked
converting functions which returns None:
>>> m.sget('reply.fake_node').to_dict() is None
True
|
train
|
https://github.com/stxnext/mappet/blob/ac7468ac28ed82e45065b1e348cf865c8f73f0db/mappet/mappet.py#L457-L507
|
[
"def getattr(self, key, default=None, callback=None):\n u\"\"\"Getting the attribute of an element.\n\n >>> xml = etree.Element('root')\n >>> xml.text = 'text'\n >>> Node(xml).getattr('text')\n 'text'\n >>> Node(xml).getattr('text', callback=str.upper)\n 'TEXT'\n >>> Node(xml).getattr('wrong_attr', default='default')\n 'default'\n \"\"\"\n value = self._xml.text if key == 'text' else self._xml.get(key, default)\n return callback(value) if callback else value\n"
] |
class Mappet(Node):
u"""A node that may have children."""
_aliases = None
u"""Dictionary with node aliases.
The keys are normalized tagnames, values are the original tagnames.
_aliases = {
'car_model_desc': 'car-model-desc',
'car': 'Car',
}
"""
def __init__(self, xml):
u"""Creates the mappet object from either lxml object, a string or a dict.
If you pass a dict without root element, one will be created for you with
'root' as tag name.
>>> Mappet({'a': {'#text': 'list_elem_1', '@attr1': 'val1'}}).to_str()
'<a attr1="val1">list_elem_1</a>'
>>> Mappet({'#text': 'list_elem_1', '@attr1': 'val1'}).to_str()
'<root attr1="val1">list_elem_1</root>'
"""
if etree.iselement(xml):
self._xml = xml
elif isinstance(xml, basestring):
self._xml = etree.fromstring(xml)
elif isinstance(xml, dict):
if len(xml) == 1:
root_name = xml.keys()[0]
body = xml[root_name]
else:
root_name = 'root'
body = xml
self._xml = helpers.dict_to_etree(body, etree.Element(root_name))
else:
raise AttributeError('Specified data cannot be used to construct a Mappet object.')
def __nonzero__(self):
u"""Checks if this node has children, otherwise returns False."""
return self.has_children()
def __len__(self):
u"""Returns the children count."""
return len(self._xml)
def __dir__(self):
u"""Returns a list of children and available helper methods."""
return sorted(self.keys() | {m for m in dir(self.__class__) if m.startswith('to_')})
def __deepcopy__(self, memodict):
u"""Performs a deepcopy on the underlying XML tree."""
return self.__class__(deepcopy(self._xml))
def __getattr__(self, name):
u"""Attribute access.
Returns a list o children, if there is more than 1.
Returns a child, if there is exactly 1.
"""
children = self.children(name)
if len(children) > 1:
return children
elif len(children) == 1:
return children[0]
def __setattr__(self, name, value):
u"""Node attribute assignment.
Calls ``set`` in the end.
"""
# Only elements that aren't a part of class definition are overwritten.
if name not in dir(self.__class__):
return self.set(name, value)
return super(Mappet, self).__setattr__(name, value)
def __delattr__(self, key):
u"""Node removal."""
# Searches among aliases, if none is found returns the original key.
tag = self._get_aliases().get(key, key)
self.__delitem__(tag)
def __getitem__(self, key):
u"""Dictionary access."""
# Checks if the call isn't to an attribute.
if isinstance(key, basestring) and not key.startswith('@'):
children = self.children(key)
if len(children) == 1:
children = children[0]
# Return the value if it's a leaf.
if isinstance(children, Literal):
return children.get()
return children
return super(Mappet, self).__getitem__(key)
def __delitem__(self, key):
u"""Removes all children with a given key."""
# Checks if name is not a part of class definition.
if key not in dir(self.__class__):
for child in self._xml.iterchildren(tag=key):
self._xml.remove(child)
def __eq__(self, other):
u"""Compares mappet objects.
Two mappet objects are deemed equal if the lxmls object they represent are equal.
"""
return etree.tostring(self._xml) == etree.tostring(other._xml)
def __contains__(self, path):
u"""Check if object contains given path."""
elem = self.sget(path)
return not (elem is None or elem is NONE_NODE)
def __getstate__(self):
u"""Converts the lxml to string for Pickling."""
return {
'_xml': etree.tostring(self._xml, pretty_print=False)
}
def __setstate__(self, dict_):
u"""Restores a Pickled mappet object."""
self._xml = etree.fromstring(dict_['_xml'])
def __iter__(self):
u"""Returns children as an iterator."""
return self.iter_children()
def to_str(self, pretty_print=False, encoding=None, **kw):
u"""Converts a node with all of it's children to a string.
Remaining arguments are passed to etree.tostring as is.
kwarg without_comments: bool because it works only in C14N flags:
'pretty print' and 'encoding' are ignored.
:param bool pretty_print: whether to format the output
:param str encoding: which encoding to use (ASCII by default)
:rtype: str
:returns: node's representation as a string
"""
if kw.get('without_comments') and not kw.get('method'):
kw.pop('without_comments')
kw['method'] = 'c14n'
kw['with_comments'] = False
return etree.tostring(
self._xml,
pretty_print=pretty_print,
encoding=encoding,
**kw
)
def has_children(self):
u"""Returns true if a node has children."""
return bool(len(self))
def iter_children(self, key=None):
u"""Iterates over children.
:param key: A key for filtering children by tagname.
"""
tag = None
if key:
tag = self._get_aliases().get(key)
if not tag:
raise KeyError(key)
for child in self._xml.iterchildren(tag=tag):
if len(child):
yield self.__class__(child)
else:
yield Literal(child)
def children(self, key=None):
u"""Returns node's children.
:param key: A key for filtering children by tagname.
"""
return list(self.iter_children(key))
def update(self, **kwargs):
u"""Updating or creation of new simple nodes.
Each dict key is used as a tagname and value as text.
"""
for key, value in kwargs.items():
helper = helpers.CAST_DICT.get(type(value), str)
tag = self._get_aliases().get(key, key)
elements = list(self._xml.iterchildren(tag=tag))
if elements:
for element in elements:
element.text = helper(value)
else:
element = etree.Element(key)
element.text = helper(value)
self._xml.append(element)
self._aliases = None
def create(self, tag, value):
u"""Creates a node, if it doesn't exist yet.
Unlike attribute access, this allows to pass a node's name with hyphens.
Those hyphens will be normalized automatically.
In case the required element already exists, raises an exception.
Updating/overwriting should be done using `update``.
"""
child_tags = {child.tag for child in self._xml}
if tag in child_tags:
raise KeyError('Node {} already exists in XML tree.'.format(tag))
self.set(tag, value)
def set(self, name, value):
u"""Assigns a new XML structure to the node.
A literal value, dict or list can be passed in. Works for all nested levels.
Dictionary:
>>> m = Mappet('<root/>')
>>> m.head = {'a': 'A', 'b': {'#text': 'B', '@attr': 'val'}}
>>> m.head.to_str()
'<head><a>A</a><b attr="val">B</b></head>'
List:
>>> m.head = [{'a': i} for i in 'ABC']
>>> m.head.to_str()
'<head><a>A</a><a>B</a><a>C</a></head>'
Literals:
>>> m.head.leaf = 'A'
>>> m.head.leaf.get()
'A'
"""
try:
# Searches for a node to assign to.
element = next(self._xml.iterchildren(tag=name))
except StopIteration:
# There is no such node in the XML tree. We create a new one
# with current root as parent (self._xml).
element = etree.SubElement(self._xml, name)
if isinstance(value, dict):
self.assign_dict(element, value)
elif isinstance(value, (list, tuple, set)):
self.assign_sequence_or_set(element, value)
else:
# Literal value.
self.assign_literal(element, value)
# Clear the aliases.
self._aliases = None
def assign_dict(self, node, xml_dict):
"""Assigns a Python dict to a ``lxml`` node.
:param node: A node to assign the dict to.
:param xml_dict: The dict with attributes/children to use.
"""
new_node = etree.Element(node.tag)
# Replaces the previous node with the new one
self._xml.replace(node, new_node)
# Copies #text and @attrs from the xml_dict
helpers.dict_to_etree(xml_dict, new_node)
@staticmethod
def assign_sequence_or_set(element, value):
element.clear()
for item in value:
temp_element = etree.Element('temp')
helpers.dict_to_etree(item, temp_element)
for child in temp_element.iterchildren():
element.append(child)
del temp_element
@staticmethod
def assign_literal(element, value):
u"""Assigns a literal.
If a given node doesn't exist, it will be created.
:param etree.Element element: element to which we assign.
:param value: the value to assign
"""
# Searches for a conversion method specific to the type of value.
helper = helpers.CAST_DICT.get(type(value), str)
# Removes all children and attributes.
element.clear()
element.text = helper(value)
def to_dict(self, **kw):
u"""Converts the lxml object to a dict.
possible kwargs:
without_comments: bool
"""
_, value = helpers.etree_to_dict(self._xml, **kw).popitem()
return value
def _get_aliases(self):
u"""Creates a dict with aliases.
The key is a normalized tagname, value the original tagname.
"""
if self._aliases is None:
self._aliases = {}
if self._xml is not None:
for child in self._xml.iterchildren():
self._aliases[helpers.normalize_tag(child.tag)] = child.tag
return self._aliases
def xpath(
self,
path,
namespaces=None,
regexp=False,
smart_strings=True,
single_use=False,
):
u"""Executes XPath query on the ``lxml`` object and returns a correct object.
:param str path: XPath string e.g., 'cars'/'car'
:param str/dict namespaces: e.g., 'exslt', 're' or
``{'re': "http://exslt.org/regular-expressions"}``
:param bool regexp: if ``True`` and no namespaces is provided, it will use
``exslt`` namespace
:param bool smart_strings:
:param bool single_use: faster method for using only once. Does not
create ``XPathEvaluator`` instance.
>>> root = mappet.Mappet("<root><a>aB</a><b>aBc</b></root>")
>>> root.XPath(
"//*[re:test(., '^abc$', 'i')]",
namespaces='exslt',
regexp=True,
)
"""
if (
namespaces in ['exslt', 're'] or
(regexp and not namespaces)
):
namespaces = {'re': "http://exslt.org/regular-expressions"}
if single_use:
node = self._xml.xpath(path)
else:
xpe = self.xpath_evaluator(
namespaces=namespaces,
regexp=regexp,
smart_strings=smart_strings
)
node = xpe(path)
if len(node) == 1:
node = node[0]
if len(node):
return self.__class__(node)
else:
return Literal(node)
return node
def xpath_evaluator(self, namespaces=None, regexp=False, smart_strings=True):
u"""Creates an XPathEvaluator instance for an ElementTree or an Element.
:returns: ``XPathEvaluator`` instance
"""
return etree.XPathEvaluator(
self._xml,
namespaces=namespaces,
regexp=regexp,
smart_strings=smart_strings
)
def keys(self):
"""Returns a set of node's keys."""
return set(self._get_aliases().keys())
|
stxnext/mappet
|
mappet/mappet.py
|
Mappet.create
|
python
|
def create(self, tag, value):
u"""Creates a node, if it doesn't exist yet.
Unlike attribute access, this allows to pass a node's name with hyphens.
Those hyphens will be normalized automatically.
In case the required element already exists, raises an exception.
Updating/overwriting should be done using `update``.
"""
child_tags = {child.tag for child in self._xml}
if tag in child_tags:
raise KeyError('Node {} already exists in XML tree.'.format(tag))
self.set(tag, value)
|
u"""Creates a node, if it doesn't exist yet.
Unlike attribute access, this allows to pass a node's name with hyphens.
Those hyphens will be normalized automatically.
In case the required element already exists, raises an exception.
Updating/overwriting should be done using `update``.
|
train
|
https://github.com/stxnext/mappet/blob/ac7468ac28ed82e45065b1e348cf865c8f73f0db/mappet/mappet.py#L509-L523
|
[
"def set(self, name, value):\n u\"\"\"Assigns a new XML structure to the node.\n\n A literal value, dict or list can be passed in. Works for all nested levels.\n\n Dictionary:\n >>> m = Mappet('<root/>')\n >>> m.head = {'a': 'A', 'b': {'#text': 'B', '@attr': 'val'}}\n >>> m.head.to_str()\n '<head><a>A</a><b attr=\"val\">B</b></head>'\n\n List:\n >>> m.head = [{'a': i} for i in 'ABC']\n >>> m.head.to_str()\n '<head><a>A</a><a>B</a><a>C</a></head>'\n\n Literals:\n >>> m.head.leaf = 'A'\n >>> m.head.leaf.get()\n 'A'\n \"\"\"\n try:\n # Searches for a node to assign to.\n element = next(self._xml.iterchildren(tag=name))\n except StopIteration:\n # There is no such node in the XML tree. We create a new one\n # with current root as parent (self._xml).\n element = etree.SubElement(self._xml, name)\n\n if isinstance(value, dict):\n self.assign_dict(element, value)\n elif isinstance(value, (list, tuple, set)):\n self.assign_sequence_or_set(element, value)\n else:\n # Literal value.\n self.assign_literal(element, value)\n\n # Clear the aliases.\n self._aliases = None\n"
] |
class Mappet(Node):
u"""A node that may have children."""
_aliases = None
u"""Dictionary with node aliases.
The keys are normalized tagnames, values are the original tagnames.
_aliases = {
'car_model_desc': 'car-model-desc',
'car': 'Car',
}
"""
def __init__(self, xml):
u"""Creates the mappet object from either lxml object, a string or a dict.
If you pass a dict without root element, one will be created for you with
'root' as tag name.
>>> Mappet({'a': {'#text': 'list_elem_1', '@attr1': 'val1'}}).to_str()
'<a attr1="val1">list_elem_1</a>'
>>> Mappet({'#text': 'list_elem_1', '@attr1': 'val1'}).to_str()
'<root attr1="val1">list_elem_1</root>'
"""
if etree.iselement(xml):
self._xml = xml
elif isinstance(xml, basestring):
self._xml = etree.fromstring(xml)
elif isinstance(xml, dict):
if len(xml) == 1:
root_name = xml.keys()[0]
body = xml[root_name]
else:
root_name = 'root'
body = xml
self._xml = helpers.dict_to_etree(body, etree.Element(root_name))
else:
raise AttributeError('Specified data cannot be used to construct a Mappet object.')
def __nonzero__(self):
u"""Checks if this node has children, otherwise returns False."""
return self.has_children()
def __len__(self):
u"""Returns the children count."""
return len(self._xml)
def __dir__(self):
u"""Returns a list of children and available helper methods."""
return sorted(self.keys() | {m for m in dir(self.__class__) if m.startswith('to_')})
def __deepcopy__(self, memodict):
u"""Performs a deepcopy on the underlying XML tree."""
return self.__class__(deepcopy(self._xml))
def __getattr__(self, name):
u"""Attribute access.
Returns a list o children, if there is more than 1.
Returns a child, if there is exactly 1.
"""
children = self.children(name)
if len(children) > 1:
return children
elif len(children) == 1:
return children[0]
def __setattr__(self, name, value):
u"""Node attribute assignment.
Calls ``set`` in the end.
"""
# Only elements that aren't a part of class definition are overwritten.
if name not in dir(self.__class__):
return self.set(name, value)
return super(Mappet, self).__setattr__(name, value)
def __delattr__(self, key):
u"""Node removal."""
# Searches among aliases, if none is found returns the original key.
tag = self._get_aliases().get(key, key)
self.__delitem__(tag)
def __getitem__(self, key):
u"""Dictionary access."""
# Checks if the call isn't to an attribute.
if isinstance(key, basestring) and not key.startswith('@'):
children = self.children(key)
if len(children) == 1:
children = children[0]
# Return the value if it's a leaf.
if isinstance(children, Literal):
return children.get()
return children
return super(Mappet, self).__getitem__(key)
def __delitem__(self, key):
u"""Removes all children with a given key."""
# Checks if name is not a part of class definition.
if key not in dir(self.__class__):
for child in self._xml.iterchildren(tag=key):
self._xml.remove(child)
def __eq__(self, other):
u"""Compares mappet objects.
Two mappet objects are deemed equal if the lxmls object they represent are equal.
"""
return etree.tostring(self._xml) == etree.tostring(other._xml)
def __contains__(self, path):
u"""Check if object contains given path."""
elem = self.sget(path)
return not (elem is None or elem is NONE_NODE)
def __getstate__(self):
u"""Converts the lxml to string for Pickling."""
return {
'_xml': etree.tostring(self._xml, pretty_print=False)
}
def __setstate__(self, dict_):
u"""Restores a Pickled mappet object."""
self._xml = etree.fromstring(dict_['_xml'])
def __iter__(self):
u"""Returns children as an iterator."""
return self.iter_children()
def to_str(self, pretty_print=False, encoding=None, **kw):
u"""Converts a node with all of it's children to a string.
Remaining arguments are passed to etree.tostring as is.
kwarg without_comments: bool because it works only in C14N flags:
'pretty print' and 'encoding' are ignored.
:param bool pretty_print: whether to format the output
:param str encoding: which encoding to use (ASCII by default)
:rtype: str
:returns: node's representation as a string
"""
if kw.get('without_comments') and not kw.get('method'):
kw.pop('without_comments')
kw['method'] = 'c14n'
kw['with_comments'] = False
return etree.tostring(
self._xml,
pretty_print=pretty_print,
encoding=encoding,
**kw
)
def has_children(self):
u"""Returns true if a node has children."""
return bool(len(self))
def iter_children(self, key=None):
u"""Iterates over children.
:param key: A key for filtering children by tagname.
"""
tag = None
if key:
tag = self._get_aliases().get(key)
if not tag:
raise KeyError(key)
for child in self._xml.iterchildren(tag=tag):
if len(child):
yield self.__class__(child)
else:
yield Literal(child)
def children(self, key=None):
u"""Returns node's children.
:param key: A key for filtering children by tagname.
"""
return list(self.iter_children(key))
def update(self, **kwargs):
u"""Updating or creation of new simple nodes.
Each dict key is used as a tagname and value as text.
"""
for key, value in kwargs.items():
helper = helpers.CAST_DICT.get(type(value), str)
tag = self._get_aliases().get(key, key)
elements = list(self._xml.iterchildren(tag=tag))
if elements:
for element in elements:
element.text = helper(value)
else:
element = etree.Element(key)
element.text = helper(value)
self._xml.append(element)
self._aliases = None
def sget(self, path, default=NONE_NODE):
u"""Enables access to nodes if one or more of them don't exist.
Example:
>>> m = Mappet('<root><tag attr1="attr text">text value</tag></root>')
>>> m.sget('tag')
text value
>>> m.sget('tag.@attr1')
'attr text'
>>> m.sget('tag.#text')
'text value'
>>> m.sget('reply.vms_model_cars.car.0.params.doors')
NONE_NODE
Accessing nonexistent path returns None-like object with mocked
converting functions which returns None:
>>> m.sget('reply.fake_node').to_dict() is None
True
"""
attrs = str(path).split(".")
text_or_attr = None
last_attr = attrs[-1]
# Case of getting text or attribute
if last_attr == '#text' or last_attr.startswith('@'):
# #text => text, @attr => attr
text_or_attr = last_attr[1:]
attrs = attrs[:-1]
# When getting #text and @attr we want default value to be None.
if default is NONE_NODE:
default = None
my_object = self
for attr in attrs:
try:
if isinstance(my_object, (list, tuple)) and re.match('^\-?\d+$', attr):
my_object_next = my_object[int(attr)]
else:
my_object_next = getattr(my_object, attr)
my_object = my_object_next
except (AttributeError, KeyError, IndexError):
return default
# Return #text or @attr
if text_or_attr:
try:
return my_object.getattr(text_or_attr)
except AttributeError:
# myObject can be a list.
return None
else:
return my_object
def set(self, name, value):
u"""Assigns a new XML structure to the node.
A literal value, dict or list can be passed in. Works for all nested levels.
Dictionary:
>>> m = Mappet('<root/>')
>>> m.head = {'a': 'A', 'b': {'#text': 'B', '@attr': 'val'}}
>>> m.head.to_str()
'<head><a>A</a><b attr="val">B</b></head>'
List:
>>> m.head = [{'a': i} for i in 'ABC']
>>> m.head.to_str()
'<head><a>A</a><a>B</a><a>C</a></head>'
Literals:
>>> m.head.leaf = 'A'
>>> m.head.leaf.get()
'A'
"""
try:
# Searches for a node to assign to.
element = next(self._xml.iterchildren(tag=name))
except StopIteration:
# There is no such node in the XML tree. We create a new one
# with current root as parent (self._xml).
element = etree.SubElement(self._xml, name)
if isinstance(value, dict):
self.assign_dict(element, value)
elif isinstance(value, (list, tuple, set)):
self.assign_sequence_or_set(element, value)
else:
# Literal value.
self.assign_literal(element, value)
# Clear the aliases.
self._aliases = None
def assign_dict(self, node, xml_dict):
"""Assigns a Python dict to a ``lxml`` node.
:param node: A node to assign the dict to.
:param xml_dict: The dict with attributes/children to use.
"""
new_node = etree.Element(node.tag)
# Replaces the previous node with the new one
self._xml.replace(node, new_node)
# Copies #text and @attrs from the xml_dict
helpers.dict_to_etree(xml_dict, new_node)
@staticmethod
def assign_sequence_or_set(element, value):
element.clear()
for item in value:
temp_element = etree.Element('temp')
helpers.dict_to_etree(item, temp_element)
for child in temp_element.iterchildren():
element.append(child)
del temp_element
@staticmethod
def assign_literal(element, value):
u"""Assigns a literal.
If a given node doesn't exist, it will be created.
:param etree.Element element: element to which we assign.
:param value: the value to assign
"""
# Searches for a conversion method specific to the type of value.
helper = helpers.CAST_DICT.get(type(value), str)
# Removes all children and attributes.
element.clear()
element.text = helper(value)
def to_dict(self, **kw):
u"""Converts the lxml object to a dict.
possible kwargs:
without_comments: bool
"""
_, value = helpers.etree_to_dict(self._xml, **kw).popitem()
return value
def _get_aliases(self):
u"""Creates a dict with aliases.
The key is a normalized tagname, value the original tagname.
"""
if self._aliases is None:
self._aliases = {}
if self._xml is not None:
for child in self._xml.iterchildren():
self._aliases[helpers.normalize_tag(child.tag)] = child.tag
return self._aliases
def xpath(
self,
path,
namespaces=None,
regexp=False,
smart_strings=True,
single_use=False,
):
u"""Executes XPath query on the ``lxml`` object and returns a correct object.
:param str path: XPath string e.g., 'cars'/'car'
:param str/dict namespaces: e.g., 'exslt', 're' or
``{'re': "http://exslt.org/regular-expressions"}``
:param bool regexp: if ``True`` and no namespaces is provided, it will use
``exslt`` namespace
:param bool smart_strings:
:param bool single_use: faster method for using only once. Does not
create ``XPathEvaluator`` instance.
>>> root = mappet.Mappet("<root><a>aB</a><b>aBc</b></root>")
>>> root.XPath(
"//*[re:test(., '^abc$', 'i')]",
namespaces='exslt',
regexp=True,
)
"""
if (
namespaces in ['exslt', 're'] or
(regexp and not namespaces)
):
namespaces = {'re': "http://exslt.org/regular-expressions"}
if single_use:
node = self._xml.xpath(path)
else:
xpe = self.xpath_evaluator(
namespaces=namespaces,
regexp=regexp,
smart_strings=smart_strings
)
node = xpe(path)
if len(node) == 1:
node = node[0]
if len(node):
return self.__class__(node)
else:
return Literal(node)
return node
def xpath_evaluator(self, namespaces=None, regexp=False, smart_strings=True):
u"""Creates an XPathEvaluator instance for an ElementTree or an Element.
:returns: ``XPathEvaluator`` instance
"""
return etree.XPathEvaluator(
self._xml,
namespaces=namespaces,
regexp=regexp,
smart_strings=smart_strings
)
def keys(self):
"""Returns a set of node's keys."""
return set(self._get_aliases().keys())
|
stxnext/mappet
|
mappet/mappet.py
|
Mappet.set
|
python
|
def set(self, name, value):
u"""Assigns a new XML structure to the node.
A literal value, dict or list can be passed in. Works for all nested levels.
Dictionary:
>>> m = Mappet('<root/>')
>>> m.head = {'a': 'A', 'b': {'#text': 'B', '@attr': 'val'}}
>>> m.head.to_str()
'<head><a>A</a><b attr="val">B</b></head>'
List:
>>> m.head = [{'a': i} for i in 'ABC']
>>> m.head.to_str()
'<head><a>A</a><a>B</a><a>C</a></head>'
Literals:
>>> m.head.leaf = 'A'
>>> m.head.leaf.get()
'A'
"""
try:
# Searches for a node to assign to.
element = next(self._xml.iterchildren(tag=name))
except StopIteration:
# There is no such node in the XML tree. We create a new one
# with current root as parent (self._xml).
element = etree.SubElement(self._xml, name)
if isinstance(value, dict):
self.assign_dict(element, value)
elif isinstance(value, (list, tuple, set)):
self.assign_sequence_or_set(element, value)
else:
# Literal value.
self.assign_literal(element, value)
# Clear the aliases.
self._aliases = None
|
u"""Assigns a new XML structure to the node.
A literal value, dict or list can be passed in. Works for all nested levels.
Dictionary:
>>> m = Mappet('<root/>')
>>> m.head = {'a': 'A', 'b': {'#text': 'B', '@attr': 'val'}}
>>> m.head.to_str()
'<head><a>A</a><b attr="val">B</b></head>'
List:
>>> m.head = [{'a': i} for i in 'ABC']
>>> m.head.to_str()
'<head><a>A</a><a>B</a><a>C</a></head>'
Literals:
>>> m.head.leaf = 'A'
>>> m.head.leaf.get()
'A'
|
train
|
https://github.com/stxnext/mappet/blob/ac7468ac28ed82e45065b1e348cf865c8f73f0db/mappet/mappet.py#L525-L563
|
[
"def assign_dict(self, node, xml_dict):\n \"\"\"Assigns a Python dict to a ``lxml`` node.\n\n :param node: A node to assign the dict to.\n :param xml_dict: The dict with attributes/children to use.\n \"\"\"\n new_node = etree.Element(node.tag)\n\n # Replaces the previous node with the new one\n self._xml.replace(node, new_node)\n\n # Copies #text and @attrs from the xml_dict\n helpers.dict_to_etree(xml_dict, new_node)\n",
"def assign_sequence_or_set(element, value):\n element.clear()\n\n for item in value:\n temp_element = etree.Element('temp')\n helpers.dict_to_etree(item, temp_element)\n for child in temp_element.iterchildren():\n element.append(child)\n del temp_element\n",
"def assign_literal(element, value):\n u\"\"\"Assigns a literal.\n\n If a given node doesn't exist, it will be created.\n\n :param etree.Element element: element to which we assign.\n :param value: the value to assign\n \"\"\"\n # Searches for a conversion method specific to the type of value.\n helper = helpers.CAST_DICT.get(type(value), str)\n\n # Removes all children and attributes.\n element.clear()\n element.text = helper(value)\n"
] |
class Mappet(Node):
u"""A node that may have children."""
_aliases = None
u"""Dictionary with node aliases.
The keys are normalized tagnames, values are the original tagnames.
_aliases = {
'car_model_desc': 'car-model-desc',
'car': 'Car',
}
"""
def __init__(self, xml):
u"""Creates the mappet object from either lxml object, a string or a dict.
If you pass a dict without root element, one will be created for you with
'root' as tag name.
>>> Mappet({'a': {'#text': 'list_elem_1', '@attr1': 'val1'}}).to_str()
'<a attr1="val1">list_elem_1</a>'
>>> Mappet({'#text': 'list_elem_1', '@attr1': 'val1'}).to_str()
'<root attr1="val1">list_elem_1</root>'
"""
if etree.iselement(xml):
self._xml = xml
elif isinstance(xml, basestring):
self._xml = etree.fromstring(xml)
elif isinstance(xml, dict):
if len(xml) == 1:
root_name = xml.keys()[0]
body = xml[root_name]
else:
root_name = 'root'
body = xml
self._xml = helpers.dict_to_etree(body, etree.Element(root_name))
else:
raise AttributeError('Specified data cannot be used to construct a Mappet object.')
def __nonzero__(self):
u"""Checks if this node has children, otherwise returns False."""
return self.has_children()
def __len__(self):
u"""Returns the children count."""
return len(self._xml)
def __dir__(self):
u"""Returns a list of children and available helper methods."""
return sorted(self.keys() | {m for m in dir(self.__class__) if m.startswith('to_')})
def __deepcopy__(self, memodict):
u"""Performs a deepcopy on the underlying XML tree."""
return self.__class__(deepcopy(self._xml))
def __getattr__(self, name):
u"""Attribute access.
Returns a list o children, if there is more than 1.
Returns a child, if there is exactly 1.
"""
children = self.children(name)
if len(children) > 1:
return children
elif len(children) == 1:
return children[0]
def __setattr__(self, name, value):
u"""Node attribute assignment.
Calls ``set`` in the end.
"""
# Only elements that aren't a part of class definition are overwritten.
if name not in dir(self.__class__):
return self.set(name, value)
return super(Mappet, self).__setattr__(name, value)
def __delattr__(self, key):
u"""Node removal."""
# Searches among aliases, if none is found returns the original key.
tag = self._get_aliases().get(key, key)
self.__delitem__(tag)
def __getitem__(self, key):
u"""Dictionary access."""
# Checks if the call isn't to an attribute.
if isinstance(key, basestring) and not key.startswith('@'):
children = self.children(key)
if len(children) == 1:
children = children[0]
# Return the value if it's a leaf.
if isinstance(children, Literal):
return children.get()
return children
return super(Mappet, self).__getitem__(key)
def __delitem__(self, key):
u"""Removes all children with a given key."""
# Checks if name is not a part of class definition.
if key not in dir(self.__class__):
for child in self._xml.iterchildren(tag=key):
self._xml.remove(child)
def __eq__(self, other):
u"""Compares mappet objects.
Two mappet objects are deemed equal if the lxmls object they represent are equal.
"""
return etree.tostring(self._xml) == etree.tostring(other._xml)
def __contains__(self, path):
u"""Check if object contains given path."""
elem = self.sget(path)
return not (elem is None or elem is NONE_NODE)
def __getstate__(self):
u"""Converts the lxml to string for Pickling."""
return {
'_xml': etree.tostring(self._xml, pretty_print=False)
}
def __setstate__(self, dict_):
u"""Restores a Pickled mappet object."""
self._xml = etree.fromstring(dict_['_xml'])
def __iter__(self):
u"""Returns children as an iterator."""
return self.iter_children()
def to_str(self, pretty_print=False, encoding=None, **kw):
u"""Converts a node with all of it's children to a string.
Remaining arguments are passed to etree.tostring as is.
kwarg without_comments: bool because it works only in C14N flags:
'pretty print' and 'encoding' are ignored.
:param bool pretty_print: whether to format the output
:param str encoding: which encoding to use (ASCII by default)
:rtype: str
:returns: node's representation as a string
"""
if kw.get('without_comments') and not kw.get('method'):
kw.pop('without_comments')
kw['method'] = 'c14n'
kw['with_comments'] = False
return etree.tostring(
self._xml,
pretty_print=pretty_print,
encoding=encoding,
**kw
)
def has_children(self):
u"""Returns true if a node has children."""
return bool(len(self))
def iter_children(self, key=None):
u"""Iterates over children.
:param key: A key for filtering children by tagname.
"""
tag = None
if key:
tag = self._get_aliases().get(key)
if not tag:
raise KeyError(key)
for child in self._xml.iterchildren(tag=tag):
if len(child):
yield self.__class__(child)
else:
yield Literal(child)
def children(self, key=None):
u"""Returns node's children.
:param key: A key for filtering children by tagname.
"""
return list(self.iter_children(key))
def update(self, **kwargs):
u"""Updating or creation of new simple nodes.
Each dict key is used as a tagname and value as text.
"""
for key, value in kwargs.items():
helper = helpers.CAST_DICT.get(type(value), str)
tag = self._get_aliases().get(key, key)
elements = list(self._xml.iterchildren(tag=tag))
if elements:
for element in elements:
element.text = helper(value)
else:
element = etree.Element(key)
element.text = helper(value)
self._xml.append(element)
self._aliases = None
def sget(self, path, default=NONE_NODE):
u"""Enables access to nodes if one or more of them don't exist.
Example:
>>> m = Mappet('<root><tag attr1="attr text">text value</tag></root>')
>>> m.sget('tag')
text value
>>> m.sget('tag.@attr1')
'attr text'
>>> m.sget('tag.#text')
'text value'
>>> m.sget('reply.vms_model_cars.car.0.params.doors')
NONE_NODE
Accessing nonexistent path returns None-like object with mocked
converting functions which returns None:
>>> m.sget('reply.fake_node').to_dict() is None
True
"""
attrs = str(path).split(".")
text_or_attr = None
last_attr = attrs[-1]
# Case of getting text or attribute
if last_attr == '#text' or last_attr.startswith('@'):
# #text => text, @attr => attr
text_or_attr = last_attr[1:]
attrs = attrs[:-1]
# When getting #text and @attr we want default value to be None.
if default is NONE_NODE:
default = None
my_object = self
for attr in attrs:
try:
if isinstance(my_object, (list, tuple)) and re.match('^\-?\d+$', attr):
my_object_next = my_object[int(attr)]
else:
my_object_next = getattr(my_object, attr)
my_object = my_object_next
except (AttributeError, KeyError, IndexError):
return default
# Return #text or @attr
if text_or_attr:
try:
return my_object.getattr(text_or_attr)
except AttributeError:
# myObject can be a list.
return None
else:
return my_object
def create(self, tag, value):
u"""Creates a node, if it doesn't exist yet.
Unlike attribute access, this allows to pass a node's name with hyphens.
Those hyphens will be normalized automatically.
In case the required element already exists, raises an exception.
Updating/overwriting should be done using `update``.
"""
child_tags = {child.tag for child in self._xml}
if tag in child_tags:
raise KeyError('Node {} already exists in XML tree.'.format(tag))
self.set(tag, value)
def assign_dict(self, node, xml_dict):
"""Assigns a Python dict to a ``lxml`` node.
:param node: A node to assign the dict to.
:param xml_dict: The dict with attributes/children to use.
"""
new_node = etree.Element(node.tag)
# Replaces the previous node with the new one
self._xml.replace(node, new_node)
# Copies #text and @attrs from the xml_dict
helpers.dict_to_etree(xml_dict, new_node)
@staticmethod
def assign_sequence_or_set(element, value):
element.clear()
for item in value:
temp_element = etree.Element('temp')
helpers.dict_to_etree(item, temp_element)
for child in temp_element.iterchildren():
element.append(child)
del temp_element
@staticmethod
def assign_literal(element, value):
u"""Assigns a literal.
If a given node doesn't exist, it will be created.
:param etree.Element element: element to which we assign.
:param value: the value to assign
"""
# Searches for a conversion method specific to the type of value.
helper = helpers.CAST_DICT.get(type(value), str)
# Removes all children and attributes.
element.clear()
element.text = helper(value)
def to_dict(self, **kw):
u"""Converts the lxml object to a dict.
possible kwargs:
without_comments: bool
"""
_, value = helpers.etree_to_dict(self._xml, **kw).popitem()
return value
def _get_aliases(self):
u"""Creates a dict with aliases.
The key is a normalized tagname, value the original tagname.
"""
if self._aliases is None:
self._aliases = {}
if self._xml is not None:
for child in self._xml.iterchildren():
self._aliases[helpers.normalize_tag(child.tag)] = child.tag
return self._aliases
def xpath(
self,
path,
namespaces=None,
regexp=False,
smart_strings=True,
single_use=False,
):
u"""Executes XPath query on the ``lxml`` object and returns a correct object.
:param str path: XPath string e.g., 'cars'/'car'
:param str/dict namespaces: e.g., 'exslt', 're' or
``{'re': "http://exslt.org/regular-expressions"}``
:param bool regexp: if ``True`` and no namespaces is provided, it will use
``exslt`` namespace
:param bool smart_strings:
:param bool single_use: faster method for using only once. Does not
create ``XPathEvaluator`` instance.
>>> root = mappet.Mappet("<root><a>aB</a><b>aBc</b></root>")
>>> root.XPath(
"//*[re:test(., '^abc$', 'i')]",
namespaces='exslt',
regexp=True,
)
"""
if (
namespaces in ['exslt', 're'] or
(regexp and not namespaces)
):
namespaces = {'re': "http://exslt.org/regular-expressions"}
if single_use:
node = self._xml.xpath(path)
else:
xpe = self.xpath_evaluator(
namespaces=namespaces,
regexp=regexp,
smart_strings=smart_strings
)
node = xpe(path)
if len(node) == 1:
node = node[0]
if len(node):
return self.__class__(node)
else:
return Literal(node)
return node
def xpath_evaluator(self, namespaces=None, regexp=False, smart_strings=True):
u"""Creates an XPathEvaluator instance for an ElementTree or an Element.
:returns: ``XPathEvaluator`` instance
"""
return etree.XPathEvaluator(
self._xml,
namespaces=namespaces,
regexp=regexp,
smart_strings=smart_strings
)
def keys(self):
"""Returns a set of node's keys."""
return set(self._get_aliases().keys())
|
stxnext/mappet
|
mappet/mappet.py
|
Mappet.assign_dict
|
python
|
def assign_dict(self, node, xml_dict):
new_node = etree.Element(node.tag)
# Replaces the previous node with the new one
self._xml.replace(node, new_node)
# Copies #text and @attrs from the xml_dict
helpers.dict_to_etree(xml_dict, new_node)
|
Assigns a Python dict to a ``lxml`` node.
:param node: A node to assign the dict to.
:param xml_dict: The dict with attributes/children to use.
|
train
|
https://github.com/stxnext/mappet/blob/ac7468ac28ed82e45065b1e348cf865c8f73f0db/mappet/mappet.py#L565-L577
|
[
"def dict_to_etree(d, root):\n u\"\"\"Converts a dict to lxml.etree object.\n\n >>> dict_to_etree({'root': {'#text': 'node_text', '@attr': 'val'}}, etree.Element('root')) # doctest: +ELLIPSIS\n <Element root at 0x...>\n\n :param dict d: dict representing the XML tree\n :param etree.Element root: XML node which will be assigned the resulting tree\n :returns: Textual representation of the XML tree\n :rtype: str\n \"\"\"\n def _to_etree(d, node):\n if d is None or len(d) == 0:\n return\n elif isinstance(d, basestring):\n node.text = d\n elif isinstance(d, dict):\n for k, v in d.items():\n assert isinstance(k, basestring)\n if k.startswith('#'):\n assert k == '#text'\n assert isinstance(v, basestring)\n node.text = v\n elif k.startswith('@'):\n assert isinstance(v, basestring)\n node.set(k[1:], v)\n elif isinstance(v, list):\n # No matter the child count, their parent will be the same.\n sub_elem = etree.SubElement(node, k)\n\n for child_num, e in enumerate(v):\n if e is None:\n if child_num == 0:\n # Found the first occurrence of an empty child,\n # skip creating of its XML repr, since it would be\n # the same as ``sub_element`` higher up.\n continue\n # A list with None element means an empty child node\n # in its parent, thus, recreating tags we have to go\n # up one level.\n # <node><child/></child></node> <=> {'node': 'child': [None, None]}\n _to_etree(node, k)\n else:\n # If this isn't first child and it's a complex\n # value (dict), we need to check if it's value\n # is equivalent to None.\n if child_num != 0 and not (isinstance(e, dict) and not all(e.values())):\n # At least one child was None, we have to create\n # a new parent-node, which will not be empty.\n sub_elem = etree.SubElement(node, k)\n _to_etree(e, sub_elem)\n else:\n _to_etree(v, etree.SubElement(node, k))\n elif etree.iselement(d):\n # Supports the case, when we got an empty child and want to recreate it.\n etree.SubElement(d, node)\n else:\n raise AttributeError('Argument is neither dict nor basestring.')\n\n _to_etree(d, root)\n return root\n"
] |
class Mappet(Node):
u"""A node that may have children."""
_aliases = None
u"""Dictionary with node aliases.
The keys are normalized tagnames, values are the original tagnames.
_aliases = {
'car_model_desc': 'car-model-desc',
'car': 'Car',
}
"""
def __init__(self, xml):
u"""Creates the mappet object from either lxml object, a string or a dict.
If you pass a dict without root element, one will be created for you with
'root' as tag name.
>>> Mappet({'a': {'#text': 'list_elem_1', '@attr1': 'val1'}}).to_str()
'<a attr1="val1">list_elem_1</a>'
>>> Mappet({'#text': 'list_elem_1', '@attr1': 'val1'}).to_str()
'<root attr1="val1">list_elem_1</root>'
"""
if etree.iselement(xml):
self._xml = xml
elif isinstance(xml, basestring):
self._xml = etree.fromstring(xml)
elif isinstance(xml, dict):
if len(xml) == 1:
root_name = xml.keys()[0]
body = xml[root_name]
else:
root_name = 'root'
body = xml
self._xml = helpers.dict_to_etree(body, etree.Element(root_name))
else:
raise AttributeError('Specified data cannot be used to construct a Mappet object.')
def __nonzero__(self):
u"""Checks if this node has children, otherwise returns False."""
return self.has_children()
def __len__(self):
u"""Returns the children count."""
return len(self._xml)
def __dir__(self):
u"""Returns a list of children and available helper methods."""
return sorted(self.keys() | {m for m in dir(self.__class__) if m.startswith('to_')})
def __deepcopy__(self, memodict):
u"""Performs a deepcopy on the underlying XML tree."""
return self.__class__(deepcopy(self._xml))
def __getattr__(self, name):
u"""Attribute access.
Returns a list o children, if there is more than 1.
Returns a child, if there is exactly 1.
"""
children = self.children(name)
if len(children) > 1:
return children
elif len(children) == 1:
return children[0]
def __setattr__(self, name, value):
u"""Node attribute assignment.
Calls ``set`` in the end.
"""
# Only elements that aren't a part of class definition are overwritten.
if name not in dir(self.__class__):
return self.set(name, value)
return super(Mappet, self).__setattr__(name, value)
def __delattr__(self, key):
u"""Node removal."""
# Searches among aliases, if none is found returns the original key.
tag = self._get_aliases().get(key, key)
self.__delitem__(tag)
def __getitem__(self, key):
u"""Dictionary access."""
# Checks if the call isn't to an attribute.
if isinstance(key, basestring) and not key.startswith('@'):
children = self.children(key)
if len(children) == 1:
children = children[0]
# Return the value if it's a leaf.
if isinstance(children, Literal):
return children.get()
return children
return super(Mappet, self).__getitem__(key)
def __delitem__(self, key):
u"""Removes all children with a given key."""
# Checks if name is not a part of class definition.
if key not in dir(self.__class__):
for child in self._xml.iterchildren(tag=key):
self._xml.remove(child)
def __eq__(self, other):
u"""Compares mappet objects.
Two mappet objects are deemed equal if the lxmls object they represent are equal.
"""
return etree.tostring(self._xml) == etree.tostring(other._xml)
def __contains__(self, path):
u"""Check if object contains given path."""
elem = self.sget(path)
return not (elem is None or elem is NONE_NODE)
def __getstate__(self):
u"""Converts the lxml to string for Pickling."""
return {
'_xml': etree.tostring(self._xml, pretty_print=False)
}
def __setstate__(self, dict_):
u"""Restores a Pickled mappet object."""
self._xml = etree.fromstring(dict_['_xml'])
def __iter__(self):
u"""Returns children as an iterator."""
return self.iter_children()
def to_str(self, pretty_print=False, encoding=None, **kw):
u"""Converts a node with all of it's children to a string.
Remaining arguments are passed to etree.tostring as is.
kwarg without_comments: bool because it works only in C14N flags:
'pretty print' and 'encoding' are ignored.
:param bool pretty_print: whether to format the output
:param str encoding: which encoding to use (ASCII by default)
:rtype: str
:returns: node's representation as a string
"""
if kw.get('without_comments') and not kw.get('method'):
kw.pop('without_comments')
kw['method'] = 'c14n'
kw['with_comments'] = False
return etree.tostring(
self._xml,
pretty_print=pretty_print,
encoding=encoding,
**kw
)
def has_children(self):
u"""Returns true if a node has children."""
return bool(len(self))
def iter_children(self, key=None):
u"""Iterates over children.
:param key: A key for filtering children by tagname.
"""
tag = None
if key:
tag = self._get_aliases().get(key)
if not tag:
raise KeyError(key)
for child in self._xml.iterchildren(tag=tag):
if len(child):
yield self.__class__(child)
else:
yield Literal(child)
def children(self, key=None):
u"""Returns node's children.
:param key: A key for filtering children by tagname.
"""
return list(self.iter_children(key))
def update(self, **kwargs):
u"""Updating or creation of new simple nodes.
Each dict key is used as a tagname and value as text.
"""
for key, value in kwargs.items():
helper = helpers.CAST_DICT.get(type(value), str)
tag = self._get_aliases().get(key, key)
elements = list(self._xml.iterchildren(tag=tag))
if elements:
for element in elements:
element.text = helper(value)
else:
element = etree.Element(key)
element.text = helper(value)
self._xml.append(element)
self._aliases = None
def sget(self, path, default=NONE_NODE):
u"""Enables access to nodes if one or more of them don't exist.
Example:
>>> m = Mappet('<root><tag attr1="attr text">text value</tag></root>')
>>> m.sget('tag')
text value
>>> m.sget('tag.@attr1')
'attr text'
>>> m.sget('tag.#text')
'text value'
>>> m.sget('reply.vms_model_cars.car.0.params.doors')
NONE_NODE
Accessing nonexistent path returns None-like object with mocked
converting functions which returns None:
>>> m.sget('reply.fake_node').to_dict() is None
True
"""
attrs = str(path).split(".")
text_or_attr = None
last_attr = attrs[-1]
# Case of getting text or attribute
if last_attr == '#text' or last_attr.startswith('@'):
# #text => text, @attr => attr
text_or_attr = last_attr[1:]
attrs = attrs[:-1]
# When getting #text and @attr we want default value to be None.
if default is NONE_NODE:
default = None
my_object = self
for attr in attrs:
try:
if isinstance(my_object, (list, tuple)) and re.match('^\-?\d+$', attr):
my_object_next = my_object[int(attr)]
else:
my_object_next = getattr(my_object, attr)
my_object = my_object_next
except (AttributeError, KeyError, IndexError):
return default
# Return #text or @attr
if text_or_attr:
try:
return my_object.getattr(text_or_attr)
except AttributeError:
# myObject can be a list.
return None
else:
return my_object
def create(self, tag, value):
u"""Creates a node, if it doesn't exist yet.
Unlike attribute access, this allows to pass a node's name with hyphens.
Those hyphens will be normalized automatically.
In case the required element already exists, raises an exception.
Updating/overwriting should be done using `update``.
"""
child_tags = {child.tag for child in self._xml}
if tag in child_tags:
raise KeyError('Node {} already exists in XML tree.'.format(tag))
self.set(tag, value)
def set(self, name, value):
u"""Assigns a new XML structure to the node.
A literal value, dict or list can be passed in. Works for all nested levels.
Dictionary:
>>> m = Mappet('<root/>')
>>> m.head = {'a': 'A', 'b': {'#text': 'B', '@attr': 'val'}}
>>> m.head.to_str()
'<head><a>A</a><b attr="val">B</b></head>'
List:
>>> m.head = [{'a': i} for i in 'ABC']
>>> m.head.to_str()
'<head><a>A</a><a>B</a><a>C</a></head>'
Literals:
>>> m.head.leaf = 'A'
>>> m.head.leaf.get()
'A'
"""
try:
# Searches for a node to assign to.
element = next(self._xml.iterchildren(tag=name))
except StopIteration:
# There is no such node in the XML tree. We create a new one
# with current root as parent (self._xml).
element = etree.SubElement(self._xml, name)
if isinstance(value, dict):
self.assign_dict(element, value)
elif isinstance(value, (list, tuple, set)):
self.assign_sequence_or_set(element, value)
else:
# Literal value.
self.assign_literal(element, value)
# Clear the aliases.
self._aliases = None
@staticmethod
def assign_sequence_or_set(element, value):
element.clear()
for item in value:
temp_element = etree.Element('temp')
helpers.dict_to_etree(item, temp_element)
for child in temp_element.iterchildren():
element.append(child)
del temp_element
@staticmethod
def assign_literal(element, value):
u"""Assigns a literal.
If a given node doesn't exist, it will be created.
:param etree.Element element: element to which we assign.
:param value: the value to assign
"""
# Searches for a conversion method specific to the type of value.
helper = helpers.CAST_DICT.get(type(value), str)
# Removes all children and attributes.
element.clear()
element.text = helper(value)
def to_dict(self, **kw):
u"""Converts the lxml object to a dict.
possible kwargs:
without_comments: bool
"""
_, value = helpers.etree_to_dict(self._xml, **kw).popitem()
return value
def _get_aliases(self):
u"""Creates a dict with aliases.
The key is a normalized tagname, value the original tagname.
"""
if self._aliases is None:
self._aliases = {}
if self._xml is not None:
for child in self._xml.iterchildren():
self._aliases[helpers.normalize_tag(child.tag)] = child.tag
return self._aliases
def xpath(
self,
path,
namespaces=None,
regexp=False,
smart_strings=True,
single_use=False,
):
u"""Executes XPath query on the ``lxml`` object and returns a correct object.
:param str path: XPath string e.g., 'cars'/'car'
:param str/dict namespaces: e.g., 'exslt', 're' or
``{'re': "http://exslt.org/regular-expressions"}``
:param bool regexp: if ``True`` and no namespaces is provided, it will use
``exslt`` namespace
:param bool smart_strings:
:param bool single_use: faster method for using only once. Does not
create ``XPathEvaluator`` instance.
>>> root = mappet.Mappet("<root><a>aB</a><b>aBc</b></root>")
>>> root.XPath(
"//*[re:test(., '^abc$', 'i')]",
namespaces='exslt',
regexp=True,
)
"""
if (
namespaces in ['exslt', 're'] or
(regexp and not namespaces)
):
namespaces = {'re': "http://exslt.org/regular-expressions"}
if single_use:
node = self._xml.xpath(path)
else:
xpe = self.xpath_evaluator(
namespaces=namespaces,
regexp=regexp,
smart_strings=smart_strings
)
node = xpe(path)
if len(node) == 1:
node = node[0]
if len(node):
return self.__class__(node)
else:
return Literal(node)
return node
def xpath_evaluator(self, namespaces=None, regexp=False, smart_strings=True):
u"""Creates an XPathEvaluator instance for an ElementTree or an Element.
:returns: ``XPathEvaluator`` instance
"""
return etree.XPathEvaluator(
self._xml,
namespaces=namespaces,
regexp=regexp,
smart_strings=smart_strings
)
def keys(self):
"""Returns a set of node's keys."""
return set(self._get_aliases().keys())
|
stxnext/mappet
|
mappet/mappet.py
|
Mappet.assign_literal
|
python
|
def assign_literal(element, value):
u"""Assigns a literal.
If a given node doesn't exist, it will be created.
:param etree.Element element: element to which we assign.
:param value: the value to assign
"""
# Searches for a conversion method specific to the type of value.
helper = helpers.CAST_DICT.get(type(value), str)
# Removes all children and attributes.
element.clear()
element.text = helper(value)
|
u"""Assigns a literal.
If a given node doesn't exist, it will be created.
:param etree.Element element: element to which we assign.
:param value: the value to assign
|
train
|
https://github.com/stxnext/mappet/blob/ac7468ac28ed82e45065b1e348cf865c8f73f0db/mappet/mappet.py#L591-L604
| null |
class Mappet(Node):
u"""A node that may have children."""
_aliases = None
u"""Dictionary with node aliases.
The keys are normalized tagnames, values are the original tagnames.
_aliases = {
'car_model_desc': 'car-model-desc',
'car': 'Car',
}
"""
def __init__(self, xml):
u"""Creates the mappet object from either lxml object, a string or a dict.
If you pass a dict without root element, one will be created for you with
'root' as tag name.
>>> Mappet({'a': {'#text': 'list_elem_1', '@attr1': 'val1'}}).to_str()
'<a attr1="val1">list_elem_1</a>'
>>> Mappet({'#text': 'list_elem_1', '@attr1': 'val1'}).to_str()
'<root attr1="val1">list_elem_1</root>'
"""
if etree.iselement(xml):
self._xml = xml
elif isinstance(xml, basestring):
self._xml = etree.fromstring(xml)
elif isinstance(xml, dict):
if len(xml) == 1:
root_name = xml.keys()[0]
body = xml[root_name]
else:
root_name = 'root'
body = xml
self._xml = helpers.dict_to_etree(body, etree.Element(root_name))
else:
raise AttributeError('Specified data cannot be used to construct a Mappet object.')
def __nonzero__(self):
u"""Checks if this node has children, otherwise returns False."""
return self.has_children()
def __len__(self):
u"""Returns the children count."""
return len(self._xml)
def __dir__(self):
u"""Returns a list of children and available helper methods."""
return sorted(self.keys() | {m for m in dir(self.__class__) if m.startswith('to_')})
def __deepcopy__(self, memodict):
u"""Performs a deepcopy on the underlying XML tree."""
return self.__class__(deepcopy(self._xml))
def __getattr__(self, name):
u"""Attribute access.
Returns a list o children, if there is more than 1.
Returns a child, if there is exactly 1.
"""
children = self.children(name)
if len(children) > 1:
return children
elif len(children) == 1:
return children[0]
def __setattr__(self, name, value):
u"""Node attribute assignment.
Calls ``set`` in the end.
"""
# Only elements that aren't a part of class definition are overwritten.
if name not in dir(self.__class__):
return self.set(name, value)
return super(Mappet, self).__setattr__(name, value)
def __delattr__(self, key):
u"""Node removal."""
# Searches among aliases, if none is found returns the original key.
tag = self._get_aliases().get(key, key)
self.__delitem__(tag)
def __getitem__(self, key):
u"""Dictionary access."""
# Checks if the call isn't to an attribute.
if isinstance(key, basestring) and not key.startswith('@'):
children = self.children(key)
if len(children) == 1:
children = children[0]
# Return the value if it's a leaf.
if isinstance(children, Literal):
return children.get()
return children
return super(Mappet, self).__getitem__(key)
def __delitem__(self, key):
u"""Removes all children with a given key."""
# Checks if name is not a part of class definition.
if key not in dir(self.__class__):
for child in self._xml.iterchildren(tag=key):
self._xml.remove(child)
def __eq__(self, other):
u"""Compares mappet objects.
Two mappet objects are deemed equal if the lxmls object they represent are equal.
"""
return etree.tostring(self._xml) == etree.tostring(other._xml)
def __contains__(self, path):
u"""Check if object contains given path."""
elem = self.sget(path)
return not (elem is None or elem is NONE_NODE)
def __getstate__(self):
u"""Converts the lxml to string for Pickling."""
return {
'_xml': etree.tostring(self._xml, pretty_print=False)
}
def __setstate__(self, dict_):
u"""Restores a Pickled mappet object."""
self._xml = etree.fromstring(dict_['_xml'])
def __iter__(self):
u"""Returns children as an iterator."""
return self.iter_children()
def to_str(self, pretty_print=False, encoding=None, **kw):
u"""Converts a node with all of it's children to a string.
Remaining arguments are passed to etree.tostring as is.
kwarg without_comments: bool because it works only in C14N flags:
'pretty print' and 'encoding' are ignored.
:param bool pretty_print: whether to format the output
:param str encoding: which encoding to use (ASCII by default)
:rtype: str
:returns: node's representation as a string
"""
if kw.get('without_comments') and not kw.get('method'):
kw.pop('without_comments')
kw['method'] = 'c14n'
kw['with_comments'] = False
return etree.tostring(
self._xml,
pretty_print=pretty_print,
encoding=encoding,
**kw
)
def has_children(self):
u"""Returns true if a node has children."""
return bool(len(self))
def iter_children(self, key=None):
u"""Iterates over children.
:param key: A key for filtering children by tagname.
"""
tag = None
if key:
tag = self._get_aliases().get(key)
if not tag:
raise KeyError(key)
for child in self._xml.iterchildren(tag=tag):
if len(child):
yield self.__class__(child)
else:
yield Literal(child)
def children(self, key=None):
u"""Returns node's children.
:param key: A key for filtering children by tagname.
"""
return list(self.iter_children(key))
def update(self, **kwargs):
u"""Updating or creation of new simple nodes.
Each dict key is used as a tagname and value as text.
"""
for key, value in kwargs.items():
helper = helpers.CAST_DICT.get(type(value), str)
tag = self._get_aliases().get(key, key)
elements = list(self._xml.iterchildren(tag=tag))
if elements:
for element in elements:
element.text = helper(value)
else:
element = etree.Element(key)
element.text = helper(value)
self._xml.append(element)
self._aliases = None
def sget(self, path, default=NONE_NODE):
u"""Enables access to nodes if one or more of them don't exist.
Example:
>>> m = Mappet('<root><tag attr1="attr text">text value</tag></root>')
>>> m.sget('tag')
text value
>>> m.sget('tag.@attr1')
'attr text'
>>> m.sget('tag.#text')
'text value'
>>> m.sget('reply.vms_model_cars.car.0.params.doors')
NONE_NODE
Accessing nonexistent path returns None-like object with mocked
converting functions which returns None:
>>> m.sget('reply.fake_node').to_dict() is None
True
"""
attrs = str(path).split(".")
text_or_attr = None
last_attr = attrs[-1]
# Case of getting text or attribute
if last_attr == '#text' or last_attr.startswith('@'):
# #text => text, @attr => attr
text_or_attr = last_attr[1:]
attrs = attrs[:-1]
# When getting #text and @attr we want default value to be None.
if default is NONE_NODE:
default = None
my_object = self
for attr in attrs:
try:
if isinstance(my_object, (list, tuple)) and re.match('^\-?\d+$', attr):
my_object_next = my_object[int(attr)]
else:
my_object_next = getattr(my_object, attr)
my_object = my_object_next
except (AttributeError, KeyError, IndexError):
return default
# Return #text or @attr
if text_or_attr:
try:
return my_object.getattr(text_or_attr)
except AttributeError:
# myObject can be a list.
return None
else:
return my_object
def create(self, tag, value):
u"""Creates a node, if it doesn't exist yet.
Unlike attribute access, this allows to pass a node's name with hyphens.
Those hyphens will be normalized automatically.
In case the required element already exists, raises an exception.
Updating/overwriting should be done using `update``.
"""
child_tags = {child.tag for child in self._xml}
if tag in child_tags:
raise KeyError('Node {} already exists in XML tree.'.format(tag))
self.set(tag, value)
def set(self, name, value):
u"""Assigns a new XML structure to the node.
A literal value, dict or list can be passed in. Works for all nested levels.
Dictionary:
>>> m = Mappet('<root/>')
>>> m.head = {'a': 'A', 'b': {'#text': 'B', '@attr': 'val'}}
>>> m.head.to_str()
'<head><a>A</a><b attr="val">B</b></head>'
List:
>>> m.head = [{'a': i} for i in 'ABC']
>>> m.head.to_str()
'<head><a>A</a><a>B</a><a>C</a></head>'
Literals:
>>> m.head.leaf = 'A'
>>> m.head.leaf.get()
'A'
"""
try:
# Searches for a node to assign to.
element = next(self._xml.iterchildren(tag=name))
except StopIteration:
# There is no such node in the XML tree. We create a new one
# with current root as parent (self._xml).
element = etree.SubElement(self._xml, name)
if isinstance(value, dict):
self.assign_dict(element, value)
elif isinstance(value, (list, tuple, set)):
self.assign_sequence_or_set(element, value)
else:
# Literal value.
self.assign_literal(element, value)
# Clear the aliases.
self._aliases = None
def assign_dict(self, node, xml_dict):
"""Assigns a Python dict to a ``lxml`` node.
:param node: A node to assign the dict to.
:param xml_dict: The dict with attributes/children to use.
"""
new_node = etree.Element(node.tag)
# Replaces the previous node with the new one
self._xml.replace(node, new_node)
# Copies #text and @attrs from the xml_dict
helpers.dict_to_etree(xml_dict, new_node)
@staticmethod
def assign_sequence_or_set(element, value):
element.clear()
for item in value:
temp_element = etree.Element('temp')
helpers.dict_to_etree(item, temp_element)
for child in temp_element.iterchildren():
element.append(child)
del temp_element
@staticmethod
def to_dict(self, **kw):
u"""Converts the lxml object to a dict.
possible kwargs:
without_comments: bool
"""
_, value = helpers.etree_to_dict(self._xml, **kw).popitem()
return value
def _get_aliases(self):
u"""Creates a dict with aliases.
The key is a normalized tagname, value the original tagname.
"""
if self._aliases is None:
self._aliases = {}
if self._xml is not None:
for child in self._xml.iterchildren():
self._aliases[helpers.normalize_tag(child.tag)] = child.tag
return self._aliases
def xpath(
self,
path,
namespaces=None,
regexp=False,
smart_strings=True,
single_use=False,
):
u"""Executes XPath query on the ``lxml`` object and returns a correct object.
:param str path: XPath string e.g., 'cars'/'car'
:param str/dict namespaces: e.g., 'exslt', 're' or
``{'re': "http://exslt.org/regular-expressions"}``
:param bool regexp: if ``True`` and no namespaces is provided, it will use
``exslt`` namespace
:param bool smart_strings:
:param bool single_use: faster method for using only once. Does not
create ``XPathEvaluator`` instance.
>>> root = mappet.Mappet("<root><a>aB</a><b>aBc</b></root>")
>>> root.XPath(
"//*[re:test(., '^abc$', 'i')]",
namespaces='exslt',
regexp=True,
)
"""
if (
namespaces in ['exslt', 're'] or
(regexp and not namespaces)
):
namespaces = {'re': "http://exslt.org/regular-expressions"}
if single_use:
node = self._xml.xpath(path)
else:
xpe = self.xpath_evaluator(
namespaces=namespaces,
regexp=regexp,
smart_strings=smart_strings
)
node = xpe(path)
if len(node) == 1:
node = node[0]
if len(node):
return self.__class__(node)
else:
return Literal(node)
return node
def xpath_evaluator(self, namespaces=None, regexp=False, smart_strings=True):
u"""Creates an XPathEvaluator instance for an ElementTree or an Element.
:returns: ``XPathEvaluator`` instance
"""
return etree.XPathEvaluator(
self._xml,
namespaces=namespaces,
regexp=regexp,
smart_strings=smart_strings
)
def keys(self):
"""Returns a set of node's keys."""
return set(self._get_aliases().keys())
|
stxnext/mappet
|
mappet/mappet.py
|
Mappet.to_dict
|
python
|
def to_dict(self, **kw):
u"""Converts the lxml object to a dict.
possible kwargs:
without_comments: bool
"""
_, value = helpers.etree_to_dict(self._xml, **kw).popitem()
return value
|
u"""Converts the lxml object to a dict.
possible kwargs:
without_comments: bool
|
train
|
https://github.com/stxnext/mappet/blob/ac7468ac28ed82e45065b1e348cf865c8f73f0db/mappet/mappet.py#L606-L613
|
[
"def etree_to_dict(t, trim=True, **kw):\n u\"\"\"Converts an lxml.etree object to Python dict.\n\n >>> etree_to_dict(etree.Element('root'))\n {'root': None}\n\n :param etree.Element t: lxml tree to convert\n :returns d: a dict representing the lxml tree ``t``\n :rtype: dict\n \"\"\"\n d = {t.tag: {} if t.attrib else None}\n children = list(t)\n etree_to_dict_w_args = partial(etree_to_dict, trim=trim, **kw)\n\n if children:\n dd = defaultdict(list)\n d = {t.tag: {}}\n\n for dc in map(etree_to_dict_w_args, children):\n for k, v in dc.iteritems():\n # do not add Comment instance to the key\n if k is not etree.Comment:\n dd[k].append(v)\n\n d[t.tag] = {k: v[0] if len(v) == 1 else v for k, v in dd.iteritems()}\n\n if t.attrib:\n d[t.tag].update(('@' + k, v) for k, v in t.attrib.iteritems())\n if trim and t.text:\n t.text = t.text.strip()\n if t.text:\n if t.tag is etree.Comment and not kw.get('without_comments'):\n # adds a comments node\n d['#comments'] = t.text\n elif children or t.attrib:\n d[t.tag]['#text'] = t.text\n else:\n d[t.tag] = t.text\n return d\n"
] |
class Mappet(Node):
u"""A node that may have children."""
_aliases = None
u"""Dictionary with node aliases.
The keys are normalized tagnames, values are the original tagnames.
_aliases = {
'car_model_desc': 'car-model-desc',
'car': 'Car',
}
"""
def __init__(self, xml):
u"""Creates the mappet object from either lxml object, a string or a dict.
If you pass a dict without root element, one will be created for you with
'root' as tag name.
>>> Mappet({'a': {'#text': 'list_elem_1', '@attr1': 'val1'}}).to_str()
'<a attr1="val1">list_elem_1</a>'
>>> Mappet({'#text': 'list_elem_1', '@attr1': 'val1'}).to_str()
'<root attr1="val1">list_elem_1</root>'
"""
if etree.iselement(xml):
self._xml = xml
elif isinstance(xml, basestring):
self._xml = etree.fromstring(xml)
elif isinstance(xml, dict):
if len(xml) == 1:
root_name = xml.keys()[0]
body = xml[root_name]
else:
root_name = 'root'
body = xml
self._xml = helpers.dict_to_etree(body, etree.Element(root_name))
else:
raise AttributeError('Specified data cannot be used to construct a Mappet object.')
def __nonzero__(self):
u"""Checks if this node has children, otherwise returns False."""
return self.has_children()
def __len__(self):
u"""Returns the children count."""
return len(self._xml)
def __dir__(self):
u"""Returns a list of children and available helper methods."""
return sorted(self.keys() | {m for m in dir(self.__class__) if m.startswith('to_')})
def __deepcopy__(self, memodict):
u"""Performs a deepcopy on the underlying XML tree."""
return self.__class__(deepcopy(self._xml))
def __getattr__(self, name):
u"""Attribute access.
Returns a list o children, if there is more than 1.
Returns a child, if there is exactly 1.
"""
children = self.children(name)
if len(children) > 1:
return children
elif len(children) == 1:
return children[0]
def __setattr__(self, name, value):
u"""Node attribute assignment.
Calls ``set`` in the end.
"""
# Only elements that aren't a part of class definition are overwritten.
if name not in dir(self.__class__):
return self.set(name, value)
return super(Mappet, self).__setattr__(name, value)
def __delattr__(self, key):
u"""Node removal."""
# Searches among aliases, if none is found returns the original key.
tag = self._get_aliases().get(key, key)
self.__delitem__(tag)
def __getitem__(self, key):
u"""Dictionary access."""
# Checks if the call isn't to an attribute.
if isinstance(key, basestring) and not key.startswith('@'):
children = self.children(key)
if len(children) == 1:
children = children[0]
# Return the value if it's a leaf.
if isinstance(children, Literal):
return children.get()
return children
return super(Mappet, self).__getitem__(key)
def __delitem__(self, key):
u"""Removes all children with a given key."""
# Checks if name is not a part of class definition.
if key not in dir(self.__class__):
for child in self._xml.iterchildren(tag=key):
self._xml.remove(child)
def __eq__(self, other):
u"""Compares mappet objects.
Two mappet objects are deemed equal if the lxmls object they represent are equal.
"""
return etree.tostring(self._xml) == etree.tostring(other._xml)
def __contains__(self, path):
u"""Check if object contains given path."""
elem = self.sget(path)
return not (elem is None or elem is NONE_NODE)
def __getstate__(self):
u"""Converts the lxml to string for Pickling."""
return {
'_xml': etree.tostring(self._xml, pretty_print=False)
}
def __setstate__(self, dict_):
u"""Restores a Pickled mappet object."""
self._xml = etree.fromstring(dict_['_xml'])
def __iter__(self):
u"""Returns children as an iterator."""
return self.iter_children()
def to_str(self, pretty_print=False, encoding=None, **kw):
u"""Converts a node with all of it's children to a string.
Remaining arguments are passed to etree.tostring as is.
kwarg without_comments: bool because it works only in C14N flags:
'pretty print' and 'encoding' are ignored.
:param bool pretty_print: whether to format the output
:param str encoding: which encoding to use (ASCII by default)
:rtype: str
:returns: node's representation as a string
"""
if kw.get('without_comments') and not kw.get('method'):
kw.pop('without_comments')
kw['method'] = 'c14n'
kw['with_comments'] = False
return etree.tostring(
self._xml,
pretty_print=pretty_print,
encoding=encoding,
**kw
)
def has_children(self):
u"""Returns true if a node has children."""
return bool(len(self))
def iter_children(self, key=None):
u"""Iterates over children.
:param key: A key for filtering children by tagname.
"""
tag = None
if key:
tag = self._get_aliases().get(key)
if not tag:
raise KeyError(key)
for child in self._xml.iterchildren(tag=tag):
if len(child):
yield self.__class__(child)
else:
yield Literal(child)
def children(self, key=None):
u"""Returns node's children.
:param key: A key for filtering children by tagname.
"""
return list(self.iter_children(key))
def update(self, **kwargs):
u"""Updating or creation of new simple nodes.
Each dict key is used as a tagname and value as text.
"""
for key, value in kwargs.items():
helper = helpers.CAST_DICT.get(type(value), str)
tag = self._get_aliases().get(key, key)
elements = list(self._xml.iterchildren(tag=tag))
if elements:
for element in elements:
element.text = helper(value)
else:
element = etree.Element(key)
element.text = helper(value)
self._xml.append(element)
self._aliases = None
def sget(self, path, default=NONE_NODE):
u"""Enables access to nodes if one or more of them don't exist.
Example:
>>> m = Mappet('<root><tag attr1="attr text">text value</tag></root>')
>>> m.sget('tag')
text value
>>> m.sget('tag.@attr1')
'attr text'
>>> m.sget('tag.#text')
'text value'
>>> m.sget('reply.vms_model_cars.car.0.params.doors')
NONE_NODE
Accessing nonexistent path returns None-like object with mocked
converting functions which returns None:
>>> m.sget('reply.fake_node').to_dict() is None
True
"""
attrs = str(path).split(".")
text_or_attr = None
last_attr = attrs[-1]
# Case of getting text or attribute
if last_attr == '#text' or last_attr.startswith('@'):
# #text => text, @attr => attr
text_or_attr = last_attr[1:]
attrs = attrs[:-1]
# When getting #text and @attr we want default value to be None.
if default is NONE_NODE:
default = None
my_object = self
for attr in attrs:
try:
if isinstance(my_object, (list, tuple)) and re.match('^\-?\d+$', attr):
my_object_next = my_object[int(attr)]
else:
my_object_next = getattr(my_object, attr)
my_object = my_object_next
except (AttributeError, KeyError, IndexError):
return default
# Return #text or @attr
if text_or_attr:
try:
return my_object.getattr(text_or_attr)
except AttributeError:
# myObject can be a list.
return None
else:
return my_object
def create(self, tag, value):
u"""Creates a node, if it doesn't exist yet.
Unlike attribute access, this allows to pass a node's name with hyphens.
Those hyphens will be normalized automatically.
In case the required element already exists, raises an exception.
Updating/overwriting should be done using `update``.
"""
child_tags = {child.tag for child in self._xml}
if tag in child_tags:
raise KeyError('Node {} already exists in XML tree.'.format(tag))
self.set(tag, value)
def set(self, name, value):
u"""Assigns a new XML structure to the node.
A literal value, dict or list can be passed in. Works for all nested levels.
Dictionary:
>>> m = Mappet('<root/>')
>>> m.head = {'a': 'A', 'b': {'#text': 'B', '@attr': 'val'}}
>>> m.head.to_str()
'<head><a>A</a><b attr="val">B</b></head>'
List:
>>> m.head = [{'a': i} for i in 'ABC']
>>> m.head.to_str()
'<head><a>A</a><a>B</a><a>C</a></head>'
Literals:
>>> m.head.leaf = 'A'
>>> m.head.leaf.get()
'A'
"""
try:
# Searches for a node to assign to.
element = next(self._xml.iterchildren(tag=name))
except StopIteration:
# There is no such node in the XML tree. We create a new one
# with current root as parent (self._xml).
element = etree.SubElement(self._xml, name)
if isinstance(value, dict):
self.assign_dict(element, value)
elif isinstance(value, (list, tuple, set)):
self.assign_sequence_or_set(element, value)
else:
# Literal value.
self.assign_literal(element, value)
# Clear the aliases.
self._aliases = None
def assign_dict(self, node, xml_dict):
"""Assigns a Python dict to a ``lxml`` node.
:param node: A node to assign the dict to.
:param xml_dict: The dict with attributes/children to use.
"""
new_node = etree.Element(node.tag)
# Replaces the previous node with the new one
self._xml.replace(node, new_node)
# Copies #text and @attrs from the xml_dict
helpers.dict_to_etree(xml_dict, new_node)
@staticmethod
def assign_sequence_or_set(element, value):
element.clear()
for item in value:
temp_element = etree.Element('temp')
helpers.dict_to_etree(item, temp_element)
for child in temp_element.iterchildren():
element.append(child)
del temp_element
@staticmethod
def assign_literal(element, value):
u"""Assigns a literal.
If a given node doesn't exist, it will be created.
:param etree.Element element: element to which we assign.
:param value: the value to assign
"""
# Searches for a conversion method specific to the type of value.
helper = helpers.CAST_DICT.get(type(value), str)
# Removes all children and attributes.
element.clear()
element.text = helper(value)
def _get_aliases(self):
u"""Creates a dict with aliases.
The key is a normalized tagname, value the original tagname.
"""
if self._aliases is None:
self._aliases = {}
if self._xml is not None:
for child in self._xml.iterchildren():
self._aliases[helpers.normalize_tag(child.tag)] = child.tag
return self._aliases
def xpath(
self,
path,
namespaces=None,
regexp=False,
smart_strings=True,
single_use=False,
):
u"""Executes XPath query on the ``lxml`` object and returns a correct object.
:param str path: XPath string e.g., 'cars'/'car'
:param str/dict namespaces: e.g., 'exslt', 're' or
``{'re': "http://exslt.org/regular-expressions"}``
:param bool regexp: if ``True`` and no namespaces is provided, it will use
``exslt`` namespace
:param bool smart_strings:
:param bool single_use: faster method for using only once. Does not
create ``XPathEvaluator`` instance.
>>> root = mappet.Mappet("<root><a>aB</a><b>aBc</b></root>")
>>> root.XPath(
"//*[re:test(., '^abc$', 'i')]",
namespaces='exslt',
regexp=True,
)
"""
if (
namespaces in ['exslt', 're'] or
(regexp and not namespaces)
):
namespaces = {'re': "http://exslt.org/regular-expressions"}
if single_use:
node = self._xml.xpath(path)
else:
xpe = self.xpath_evaluator(
namespaces=namespaces,
regexp=regexp,
smart_strings=smart_strings
)
node = xpe(path)
if len(node) == 1:
node = node[0]
if len(node):
return self.__class__(node)
else:
return Literal(node)
return node
def xpath_evaluator(self, namespaces=None, regexp=False, smart_strings=True):
u"""Creates an XPathEvaluator instance for an ElementTree or an Element.
:returns: ``XPathEvaluator`` instance
"""
return etree.XPathEvaluator(
self._xml,
namespaces=namespaces,
regexp=regexp,
smart_strings=smart_strings
)
def keys(self):
"""Returns a set of node's keys."""
return set(self._get_aliases().keys())
|
stxnext/mappet
|
mappet/mappet.py
|
Mappet._get_aliases
|
python
|
def _get_aliases(self):
u"""Creates a dict with aliases.
The key is a normalized tagname, value the original tagname.
"""
if self._aliases is None:
self._aliases = {}
if self._xml is not None:
for child in self._xml.iterchildren():
self._aliases[helpers.normalize_tag(child.tag)] = child.tag
return self._aliases
|
u"""Creates a dict with aliases.
The key is a normalized tagname, value the original tagname.
|
train
|
https://github.com/stxnext/mappet/blob/ac7468ac28ed82e45065b1e348cf865c8f73f0db/mappet/mappet.py#L615-L627
|
[
"def normalize_tag(tag):\n u\"\"\"Normalizes tag name.\n\n :param str tag: tag name to normalize\n :rtype: str\n :returns: normalized tag name\n\n >>> normalize_tag('tag-NaMe')\n 'tag_name'\n \"\"\"\n return tag.lower().replace('-', '_')\n"
] |
class Mappet(Node):
u"""A node that may have children."""
_aliases = None
u"""Dictionary with node aliases.
The keys are normalized tagnames, values are the original tagnames.
_aliases = {
'car_model_desc': 'car-model-desc',
'car': 'Car',
}
"""
def __init__(self, xml):
u"""Creates the mappet object from either lxml object, a string or a dict.
If you pass a dict without root element, one will be created for you with
'root' as tag name.
>>> Mappet({'a': {'#text': 'list_elem_1', '@attr1': 'val1'}}).to_str()
'<a attr1="val1">list_elem_1</a>'
>>> Mappet({'#text': 'list_elem_1', '@attr1': 'val1'}).to_str()
'<root attr1="val1">list_elem_1</root>'
"""
if etree.iselement(xml):
self._xml = xml
elif isinstance(xml, basestring):
self._xml = etree.fromstring(xml)
elif isinstance(xml, dict):
if len(xml) == 1:
root_name = xml.keys()[0]
body = xml[root_name]
else:
root_name = 'root'
body = xml
self._xml = helpers.dict_to_etree(body, etree.Element(root_name))
else:
raise AttributeError('Specified data cannot be used to construct a Mappet object.')
def __nonzero__(self):
u"""Checks if this node has children, otherwise returns False."""
return self.has_children()
def __len__(self):
u"""Returns the children count."""
return len(self._xml)
def __dir__(self):
u"""Returns a list of children and available helper methods."""
return sorted(self.keys() | {m for m in dir(self.__class__) if m.startswith('to_')})
def __deepcopy__(self, memodict):
u"""Performs a deepcopy on the underlying XML tree."""
return self.__class__(deepcopy(self._xml))
def __getattr__(self, name):
u"""Attribute access.
Returns a list o children, if there is more than 1.
Returns a child, if there is exactly 1.
"""
children = self.children(name)
if len(children) > 1:
return children
elif len(children) == 1:
return children[0]
def __setattr__(self, name, value):
u"""Node attribute assignment.
Calls ``set`` in the end.
"""
# Only elements that aren't a part of class definition are overwritten.
if name not in dir(self.__class__):
return self.set(name, value)
return super(Mappet, self).__setattr__(name, value)
def __delattr__(self, key):
u"""Node removal."""
# Searches among aliases, if none is found returns the original key.
tag = self._get_aliases().get(key, key)
self.__delitem__(tag)
def __getitem__(self, key):
u"""Dictionary access."""
# Checks if the call isn't to an attribute.
if isinstance(key, basestring) and not key.startswith('@'):
children = self.children(key)
if len(children) == 1:
children = children[0]
# Return the value if it's a leaf.
if isinstance(children, Literal):
return children.get()
return children
return super(Mappet, self).__getitem__(key)
def __delitem__(self, key):
u"""Removes all children with a given key."""
# Checks if name is not a part of class definition.
if key not in dir(self.__class__):
for child in self._xml.iterchildren(tag=key):
self._xml.remove(child)
def __eq__(self, other):
u"""Compares mappet objects.
Two mappet objects are deemed equal if the lxmls object they represent are equal.
"""
return etree.tostring(self._xml) == etree.tostring(other._xml)
def __contains__(self, path):
u"""Check if object contains given path."""
elem = self.sget(path)
return not (elem is None or elem is NONE_NODE)
def __getstate__(self):
u"""Converts the lxml to string for Pickling."""
return {
'_xml': etree.tostring(self._xml, pretty_print=False)
}
def __setstate__(self, dict_):
u"""Restores a Pickled mappet object."""
self._xml = etree.fromstring(dict_['_xml'])
def __iter__(self):
u"""Returns children as an iterator."""
return self.iter_children()
def to_str(self, pretty_print=False, encoding=None, **kw):
u"""Converts a node with all of it's children to a string.
Remaining arguments are passed to etree.tostring as is.
kwarg without_comments: bool because it works only in C14N flags:
'pretty print' and 'encoding' are ignored.
:param bool pretty_print: whether to format the output
:param str encoding: which encoding to use (ASCII by default)
:rtype: str
:returns: node's representation as a string
"""
if kw.get('without_comments') and not kw.get('method'):
kw.pop('without_comments')
kw['method'] = 'c14n'
kw['with_comments'] = False
return etree.tostring(
self._xml,
pretty_print=pretty_print,
encoding=encoding,
**kw
)
def has_children(self):
u"""Returns true if a node has children."""
return bool(len(self))
def iter_children(self, key=None):
u"""Iterates over children.
:param key: A key for filtering children by tagname.
"""
tag = None
if key:
tag = self._get_aliases().get(key)
if not tag:
raise KeyError(key)
for child in self._xml.iterchildren(tag=tag):
if len(child):
yield self.__class__(child)
else:
yield Literal(child)
def children(self, key=None):
u"""Returns node's children.
:param key: A key for filtering children by tagname.
"""
return list(self.iter_children(key))
def update(self, **kwargs):
u"""Updating or creation of new simple nodes.
Each dict key is used as a tagname and value as text.
"""
for key, value in kwargs.items():
helper = helpers.CAST_DICT.get(type(value), str)
tag = self._get_aliases().get(key, key)
elements = list(self._xml.iterchildren(tag=tag))
if elements:
for element in elements:
element.text = helper(value)
else:
element = etree.Element(key)
element.text = helper(value)
self._xml.append(element)
self._aliases = None
def sget(self, path, default=NONE_NODE):
u"""Enables access to nodes if one or more of them don't exist.
Example:
>>> m = Mappet('<root><tag attr1="attr text">text value</tag></root>')
>>> m.sget('tag')
text value
>>> m.sget('tag.@attr1')
'attr text'
>>> m.sget('tag.#text')
'text value'
>>> m.sget('reply.vms_model_cars.car.0.params.doors')
NONE_NODE
Accessing nonexistent path returns None-like object with mocked
converting functions which returns None:
>>> m.sget('reply.fake_node').to_dict() is None
True
"""
attrs = str(path).split(".")
text_or_attr = None
last_attr = attrs[-1]
# Case of getting text or attribute
if last_attr == '#text' or last_attr.startswith('@'):
# #text => text, @attr => attr
text_or_attr = last_attr[1:]
attrs = attrs[:-1]
# When getting #text and @attr we want default value to be None.
if default is NONE_NODE:
default = None
my_object = self
for attr in attrs:
try:
if isinstance(my_object, (list, tuple)) and re.match('^\-?\d+$', attr):
my_object_next = my_object[int(attr)]
else:
my_object_next = getattr(my_object, attr)
my_object = my_object_next
except (AttributeError, KeyError, IndexError):
return default
# Return #text or @attr
if text_or_attr:
try:
return my_object.getattr(text_or_attr)
except AttributeError:
# myObject can be a list.
return None
else:
return my_object
def create(self, tag, value):
u"""Creates a node, if it doesn't exist yet.
Unlike attribute access, this allows to pass a node's name with hyphens.
Those hyphens will be normalized automatically.
In case the required element already exists, raises an exception.
Updating/overwriting should be done using `update``.
"""
child_tags = {child.tag for child in self._xml}
if tag in child_tags:
raise KeyError('Node {} already exists in XML tree.'.format(tag))
self.set(tag, value)
def set(self, name, value):
u"""Assigns a new XML structure to the node.
A literal value, dict or list can be passed in. Works for all nested levels.
Dictionary:
>>> m = Mappet('<root/>')
>>> m.head = {'a': 'A', 'b': {'#text': 'B', '@attr': 'val'}}
>>> m.head.to_str()
'<head><a>A</a><b attr="val">B</b></head>'
List:
>>> m.head = [{'a': i} for i in 'ABC']
>>> m.head.to_str()
'<head><a>A</a><a>B</a><a>C</a></head>'
Literals:
>>> m.head.leaf = 'A'
>>> m.head.leaf.get()
'A'
"""
try:
# Searches for a node to assign to.
element = next(self._xml.iterchildren(tag=name))
except StopIteration:
# There is no such node in the XML tree. We create a new one
# with current root as parent (self._xml).
element = etree.SubElement(self._xml, name)
if isinstance(value, dict):
self.assign_dict(element, value)
elif isinstance(value, (list, tuple, set)):
self.assign_sequence_or_set(element, value)
else:
# Literal value.
self.assign_literal(element, value)
# Clear the aliases.
self._aliases = None
def assign_dict(self, node, xml_dict):
"""Assigns a Python dict to a ``lxml`` node.
:param node: A node to assign the dict to.
:param xml_dict: The dict with attributes/children to use.
"""
new_node = etree.Element(node.tag)
# Replaces the previous node with the new one
self._xml.replace(node, new_node)
# Copies #text and @attrs from the xml_dict
helpers.dict_to_etree(xml_dict, new_node)
@staticmethod
def assign_sequence_or_set(element, value):
element.clear()
for item in value:
temp_element = etree.Element('temp')
helpers.dict_to_etree(item, temp_element)
for child in temp_element.iterchildren():
element.append(child)
del temp_element
@staticmethod
def assign_literal(element, value):
u"""Assigns a literal.
If a given node doesn't exist, it will be created.
:param etree.Element element: element to which we assign.
:param value: the value to assign
"""
# Searches for a conversion method specific to the type of value.
helper = helpers.CAST_DICT.get(type(value), str)
# Removes all children and attributes.
element.clear()
element.text = helper(value)
def to_dict(self, **kw):
u"""Converts the lxml object to a dict.
possible kwargs:
without_comments: bool
"""
_, value = helpers.etree_to_dict(self._xml, **kw).popitem()
return value
def xpath(
self,
path,
namespaces=None,
regexp=False,
smart_strings=True,
single_use=False,
):
u"""Executes XPath query on the ``lxml`` object and returns a correct object.
:param str path: XPath string e.g., 'cars'/'car'
:param str/dict namespaces: e.g., 'exslt', 're' or
``{'re': "http://exslt.org/regular-expressions"}``
:param bool regexp: if ``True`` and no namespaces is provided, it will use
``exslt`` namespace
:param bool smart_strings:
:param bool single_use: faster method for using only once. Does not
create ``XPathEvaluator`` instance.
>>> root = mappet.Mappet("<root><a>aB</a><b>aBc</b></root>")
>>> root.XPath(
"//*[re:test(., '^abc$', 'i')]",
namespaces='exslt',
regexp=True,
)
"""
if (
namespaces in ['exslt', 're'] or
(regexp and not namespaces)
):
namespaces = {'re': "http://exslt.org/regular-expressions"}
if single_use:
node = self._xml.xpath(path)
else:
xpe = self.xpath_evaluator(
namespaces=namespaces,
regexp=regexp,
smart_strings=smart_strings
)
node = xpe(path)
if len(node) == 1:
node = node[0]
if len(node):
return self.__class__(node)
else:
return Literal(node)
return node
def xpath_evaluator(self, namespaces=None, regexp=False, smart_strings=True):
u"""Creates an XPathEvaluator instance for an ElementTree or an Element.
:returns: ``XPathEvaluator`` instance
"""
return etree.XPathEvaluator(
self._xml,
namespaces=namespaces,
regexp=regexp,
smart_strings=smart_strings
)
def keys(self):
"""Returns a set of node's keys."""
return set(self._get_aliases().keys())
|
stxnext/mappet
|
mappet/mappet.py
|
Mappet.xpath
|
python
|
def xpath(
self,
path,
namespaces=None,
regexp=False,
smart_strings=True,
single_use=False,
):
u"""Executes XPath query on the ``lxml`` object and returns a correct object.
:param str path: XPath string e.g., 'cars'/'car'
:param str/dict namespaces: e.g., 'exslt', 're' or
``{'re': "http://exslt.org/regular-expressions"}``
:param bool regexp: if ``True`` and no namespaces is provided, it will use
``exslt`` namespace
:param bool smart_strings:
:param bool single_use: faster method for using only once. Does not
create ``XPathEvaluator`` instance.
>>> root = mappet.Mappet("<root><a>aB</a><b>aBc</b></root>")
>>> root.XPath(
"//*[re:test(., '^abc$', 'i')]",
namespaces='exslt',
regexp=True,
)
"""
if (
namespaces in ['exslt', 're'] or
(regexp and not namespaces)
):
namespaces = {'re': "http://exslt.org/regular-expressions"}
if single_use:
node = self._xml.xpath(path)
else:
xpe = self.xpath_evaluator(
namespaces=namespaces,
regexp=regexp,
smart_strings=smart_strings
)
node = xpe(path)
if len(node) == 1:
node = node[0]
if len(node):
return self.__class__(node)
else:
return Literal(node)
return node
|
u"""Executes XPath query on the ``lxml`` object and returns a correct object.
:param str path: XPath string e.g., 'cars'/'car'
:param str/dict namespaces: e.g., 'exslt', 're' or
``{'re': "http://exslt.org/regular-expressions"}``
:param bool regexp: if ``True`` and no namespaces is provided, it will use
``exslt`` namespace
:param bool smart_strings:
:param bool single_use: faster method for using only once. Does not
create ``XPathEvaluator`` instance.
>>> root = mappet.Mappet("<root><a>aB</a><b>aBc</b></root>")
>>> root.XPath(
"//*[re:test(., '^abc$', 'i')]",
namespaces='exslt',
regexp=True,
)
|
train
|
https://github.com/stxnext/mappet/blob/ac7468ac28ed82e45065b1e348cf865c8f73f0db/mappet/mappet.py#L629-L677
|
[
"def xpath_evaluator(self, namespaces=None, regexp=False, smart_strings=True):\n u\"\"\"Creates an XPathEvaluator instance for an ElementTree or an Element.\n\n :returns: ``XPathEvaluator`` instance\n \"\"\"\n return etree.XPathEvaluator(\n self._xml,\n namespaces=namespaces,\n regexp=regexp,\n smart_strings=smart_strings\n )\n"
] |
class Mappet(Node):
u"""A node that may have children."""
_aliases = None
u"""Dictionary with node aliases.
The keys are normalized tagnames, values are the original tagnames.
_aliases = {
'car_model_desc': 'car-model-desc',
'car': 'Car',
}
"""
def __init__(self, xml):
u"""Creates the mappet object from either lxml object, a string or a dict.
If you pass a dict without root element, one will be created for you with
'root' as tag name.
>>> Mappet({'a': {'#text': 'list_elem_1', '@attr1': 'val1'}}).to_str()
'<a attr1="val1">list_elem_1</a>'
>>> Mappet({'#text': 'list_elem_1', '@attr1': 'val1'}).to_str()
'<root attr1="val1">list_elem_1</root>'
"""
if etree.iselement(xml):
self._xml = xml
elif isinstance(xml, basestring):
self._xml = etree.fromstring(xml)
elif isinstance(xml, dict):
if len(xml) == 1:
root_name = xml.keys()[0]
body = xml[root_name]
else:
root_name = 'root'
body = xml
self._xml = helpers.dict_to_etree(body, etree.Element(root_name))
else:
raise AttributeError('Specified data cannot be used to construct a Mappet object.')
def __nonzero__(self):
u"""Checks if this node has children, otherwise returns False."""
return self.has_children()
def __len__(self):
u"""Returns the children count."""
return len(self._xml)
def __dir__(self):
u"""Returns a list of children and available helper methods."""
return sorted(self.keys() | {m for m in dir(self.__class__) if m.startswith('to_')})
def __deepcopy__(self, memodict):
u"""Performs a deepcopy on the underlying XML tree."""
return self.__class__(deepcopy(self._xml))
def __getattr__(self, name):
u"""Attribute access.
Returns a list o children, if there is more than 1.
Returns a child, if there is exactly 1.
"""
children = self.children(name)
if len(children) > 1:
return children
elif len(children) == 1:
return children[0]
def __setattr__(self, name, value):
u"""Node attribute assignment.
Calls ``set`` in the end.
"""
# Only elements that aren't a part of class definition are overwritten.
if name not in dir(self.__class__):
return self.set(name, value)
return super(Mappet, self).__setattr__(name, value)
def __delattr__(self, key):
u"""Node removal."""
# Searches among aliases, if none is found returns the original key.
tag = self._get_aliases().get(key, key)
self.__delitem__(tag)
def __getitem__(self, key):
u"""Dictionary access."""
# Checks if the call isn't to an attribute.
if isinstance(key, basestring) and not key.startswith('@'):
children = self.children(key)
if len(children) == 1:
children = children[0]
# Return the value if it's a leaf.
if isinstance(children, Literal):
return children.get()
return children
return super(Mappet, self).__getitem__(key)
def __delitem__(self, key):
u"""Removes all children with a given key."""
# Checks if name is not a part of class definition.
if key not in dir(self.__class__):
for child in self._xml.iterchildren(tag=key):
self._xml.remove(child)
def __eq__(self, other):
u"""Compares mappet objects.
Two mappet objects are deemed equal if the lxmls object they represent are equal.
"""
return etree.tostring(self._xml) == etree.tostring(other._xml)
def __contains__(self, path):
u"""Check if object contains given path."""
elem = self.sget(path)
return not (elem is None or elem is NONE_NODE)
def __getstate__(self):
u"""Converts the lxml to string for Pickling."""
return {
'_xml': etree.tostring(self._xml, pretty_print=False)
}
def __setstate__(self, dict_):
u"""Restores a Pickled mappet object."""
self._xml = etree.fromstring(dict_['_xml'])
def __iter__(self):
u"""Returns children as an iterator."""
return self.iter_children()
def to_str(self, pretty_print=False, encoding=None, **kw):
u"""Converts a node with all of it's children to a string.
Remaining arguments are passed to etree.tostring as is.
kwarg without_comments: bool because it works only in C14N flags:
'pretty print' and 'encoding' are ignored.
:param bool pretty_print: whether to format the output
:param str encoding: which encoding to use (ASCII by default)
:rtype: str
:returns: node's representation as a string
"""
if kw.get('without_comments') and not kw.get('method'):
kw.pop('without_comments')
kw['method'] = 'c14n'
kw['with_comments'] = False
return etree.tostring(
self._xml,
pretty_print=pretty_print,
encoding=encoding,
**kw
)
def has_children(self):
u"""Returns true if a node has children."""
return bool(len(self))
def iter_children(self, key=None):
u"""Iterates over children.
:param key: A key for filtering children by tagname.
"""
tag = None
if key:
tag = self._get_aliases().get(key)
if not tag:
raise KeyError(key)
for child in self._xml.iterchildren(tag=tag):
if len(child):
yield self.__class__(child)
else:
yield Literal(child)
def children(self, key=None):
u"""Returns node's children.
:param key: A key for filtering children by tagname.
"""
return list(self.iter_children(key))
def update(self, **kwargs):
u"""Updating or creation of new simple nodes.
Each dict key is used as a tagname and value as text.
"""
for key, value in kwargs.items():
helper = helpers.CAST_DICT.get(type(value), str)
tag = self._get_aliases().get(key, key)
elements = list(self._xml.iterchildren(tag=tag))
if elements:
for element in elements:
element.text = helper(value)
else:
element = etree.Element(key)
element.text = helper(value)
self._xml.append(element)
self._aliases = None
def sget(self, path, default=NONE_NODE):
u"""Enables access to nodes if one or more of them don't exist.
Example:
>>> m = Mappet('<root><tag attr1="attr text">text value</tag></root>')
>>> m.sget('tag')
text value
>>> m.sget('tag.@attr1')
'attr text'
>>> m.sget('tag.#text')
'text value'
>>> m.sget('reply.vms_model_cars.car.0.params.doors')
NONE_NODE
Accessing nonexistent path returns None-like object with mocked
converting functions which returns None:
>>> m.sget('reply.fake_node').to_dict() is None
True
"""
attrs = str(path).split(".")
text_or_attr = None
last_attr = attrs[-1]
# Case of getting text or attribute
if last_attr == '#text' or last_attr.startswith('@'):
# #text => text, @attr => attr
text_or_attr = last_attr[1:]
attrs = attrs[:-1]
# When getting #text and @attr we want default value to be None.
if default is NONE_NODE:
default = None
my_object = self
for attr in attrs:
try:
if isinstance(my_object, (list, tuple)) and re.match('^\-?\d+$', attr):
my_object_next = my_object[int(attr)]
else:
my_object_next = getattr(my_object, attr)
my_object = my_object_next
except (AttributeError, KeyError, IndexError):
return default
# Return #text or @attr
if text_or_attr:
try:
return my_object.getattr(text_or_attr)
except AttributeError:
# myObject can be a list.
return None
else:
return my_object
def create(self, tag, value):
u"""Creates a node, if it doesn't exist yet.
Unlike attribute access, this allows to pass a node's name with hyphens.
Those hyphens will be normalized automatically.
In case the required element already exists, raises an exception.
Updating/overwriting should be done using `update``.
"""
child_tags = {child.tag for child in self._xml}
if tag in child_tags:
raise KeyError('Node {} already exists in XML tree.'.format(tag))
self.set(tag, value)
def set(self, name, value):
u"""Assigns a new XML structure to the node.
A literal value, dict or list can be passed in. Works for all nested levels.
Dictionary:
>>> m = Mappet('<root/>')
>>> m.head = {'a': 'A', 'b': {'#text': 'B', '@attr': 'val'}}
>>> m.head.to_str()
'<head><a>A</a><b attr="val">B</b></head>'
List:
>>> m.head = [{'a': i} for i in 'ABC']
>>> m.head.to_str()
'<head><a>A</a><a>B</a><a>C</a></head>'
Literals:
>>> m.head.leaf = 'A'
>>> m.head.leaf.get()
'A'
"""
try:
# Searches for a node to assign to.
element = next(self._xml.iterchildren(tag=name))
except StopIteration:
# There is no such node in the XML tree. We create a new one
# with current root as parent (self._xml).
element = etree.SubElement(self._xml, name)
if isinstance(value, dict):
self.assign_dict(element, value)
elif isinstance(value, (list, tuple, set)):
self.assign_sequence_or_set(element, value)
else:
# Literal value.
self.assign_literal(element, value)
# Clear the aliases.
self._aliases = None
def assign_dict(self, node, xml_dict):
"""Assigns a Python dict to a ``lxml`` node.
:param node: A node to assign the dict to.
:param xml_dict: The dict with attributes/children to use.
"""
new_node = etree.Element(node.tag)
# Replaces the previous node with the new one
self._xml.replace(node, new_node)
# Copies #text and @attrs from the xml_dict
helpers.dict_to_etree(xml_dict, new_node)
@staticmethod
def assign_sequence_or_set(element, value):
element.clear()
for item in value:
temp_element = etree.Element('temp')
helpers.dict_to_etree(item, temp_element)
for child in temp_element.iterchildren():
element.append(child)
del temp_element
@staticmethod
def assign_literal(element, value):
u"""Assigns a literal.
If a given node doesn't exist, it will be created.
:param etree.Element element: element to which we assign.
:param value: the value to assign
"""
# Searches for a conversion method specific to the type of value.
helper = helpers.CAST_DICT.get(type(value), str)
# Removes all children and attributes.
element.clear()
element.text = helper(value)
def to_dict(self, **kw):
u"""Converts the lxml object to a dict.
possible kwargs:
without_comments: bool
"""
_, value = helpers.etree_to_dict(self._xml, **kw).popitem()
return value
def _get_aliases(self):
u"""Creates a dict with aliases.
The key is a normalized tagname, value the original tagname.
"""
if self._aliases is None:
self._aliases = {}
if self._xml is not None:
for child in self._xml.iterchildren():
self._aliases[helpers.normalize_tag(child.tag)] = child.tag
return self._aliases
def xpath_evaluator(self, namespaces=None, regexp=False, smart_strings=True):
u"""Creates an XPathEvaluator instance for an ElementTree or an Element.
:returns: ``XPathEvaluator`` instance
"""
return etree.XPathEvaluator(
self._xml,
namespaces=namespaces,
regexp=regexp,
smart_strings=smart_strings
)
def keys(self):
"""Returns a set of node's keys."""
return set(self._get_aliases().keys())
|
stxnext/mappet
|
mappet/mappet.py
|
Mappet.xpath_evaluator
|
python
|
def xpath_evaluator(self, namespaces=None, regexp=False, smart_strings=True):
u"""Creates an XPathEvaluator instance for an ElementTree or an Element.
:returns: ``XPathEvaluator`` instance
"""
return etree.XPathEvaluator(
self._xml,
namespaces=namespaces,
regexp=regexp,
smart_strings=smart_strings
)
|
u"""Creates an XPathEvaluator instance for an ElementTree or an Element.
:returns: ``XPathEvaluator`` instance
|
train
|
https://github.com/stxnext/mappet/blob/ac7468ac28ed82e45065b1e348cf865c8f73f0db/mappet/mappet.py#L679-L689
| null |
class Mappet(Node):
u"""A node that may have children."""
_aliases = None
u"""Dictionary with node aliases.
The keys are normalized tagnames, values are the original tagnames.
_aliases = {
'car_model_desc': 'car-model-desc',
'car': 'Car',
}
"""
def __init__(self, xml):
u"""Creates the mappet object from either lxml object, a string or a dict.
If you pass a dict without root element, one will be created for you with
'root' as tag name.
>>> Mappet({'a': {'#text': 'list_elem_1', '@attr1': 'val1'}}).to_str()
'<a attr1="val1">list_elem_1</a>'
>>> Mappet({'#text': 'list_elem_1', '@attr1': 'val1'}).to_str()
'<root attr1="val1">list_elem_1</root>'
"""
if etree.iselement(xml):
self._xml = xml
elif isinstance(xml, basestring):
self._xml = etree.fromstring(xml)
elif isinstance(xml, dict):
if len(xml) == 1:
root_name = xml.keys()[0]
body = xml[root_name]
else:
root_name = 'root'
body = xml
self._xml = helpers.dict_to_etree(body, etree.Element(root_name))
else:
raise AttributeError('Specified data cannot be used to construct a Mappet object.')
def __nonzero__(self):
u"""Checks if this node has children, otherwise returns False."""
return self.has_children()
def __len__(self):
u"""Returns the children count."""
return len(self._xml)
def __dir__(self):
u"""Returns a list of children and available helper methods."""
return sorted(self.keys() | {m for m in dir(self.__class__) if m.startswith('to_')})
def __deepcopy__(self, memodict):
u"""Performs a deepcopy on the underlying XML tree."""
return self.__class__(deepcopy(self._xml))
def __getattr__(self, name):
u"""Attribute access.
Returns a list o children, if there is more than 1.
Returns a child, if there is exactly 1.
"""
children = self.children(name)
if len(children) > 1:
return children
elif len(children) == 1:
return children[0]
def __setattr__(self, name, value):
u"""Node attribute assignment.
Calls ``set`` in the end.
"""
# Only elements that aren't a part of class definition are overwritten.
if name not in dir(self.__class__):
return self.set(name, value)
return super(Mappet, self).__setattr__(name, value)
def __delattr__(self, key):
u"""Node removal."""
# Searches among aliases, if none is found returns the original key.
tag = self._get_aliases().get(key, key)
self.__delitem__(tag)
def __getitem__(self, key):
u"""Dictionary access."""
# Checks if the call isn't to an attribute.
if isinstance(key, basestring) and not key.startswith('@'):
children = self.children(key)
if len(children) == 1:
children = children[0]
# Return the value if it's a leaf.
if isinstance(children, Literal):
return children.get()
return children
return super(Mappet, self).__getitem__(key)
def __delitem__(self, key):
u"""Removes all children with a given key."""
# Checks if name is not a part of class definition.
if key not in dir(self.__class__):
for child in self._xml.iterchildren(tag=key):
self._xml.remove(child)
def __eq__(self, other):
u"""Compares mappet objects.
Two mappet objects are deemed equal if the lxmls object they represent are equal.
"""
return etree.tostring(self._xml) == etree.tostring(other._xml)
def __contains__(self, path):
u"""Check if object contains given path."""
elem = self.sget(path)
return not (elem is None or elem is NONE_NODE)
def __getstate__(self):
u"""Converts the lxml to string for Pickling."""
return {
'_xml': etree.tostring(self._xml, pretty_print=False)
}
def __setstate__(self, dict_):
u"""Restores a Pickled mappet object."""
self._xml = etree.fromstring(dict_['_xml'])
def __iter__(self):
u"""Returns children as an iterator."""
return self.iter_children()
def to_str(self, pretty_print=False, encoding=None, **kw):
u"""Converts a node with all of it's children to a string.
Remaining arguments are passed to etree.tostring as is.
kwarg without_comments: bool because it works only in C14N flags:
'pretty print' and 'encoding' are ignored.
:param bool pretty_print: whether to format the output
:param str encoding: which encoding to use (ASCII by default)
:rtype: str
:returns: node's representation as a string
"""
if kw.get('without_comments') and not kw.get('method'):
kw.pop('without_comments')
kw['method'] = 'c14n'
kw['with_comments'] = False
return etree.tostring(
self._xml,
pretty_print=pretty_print,
encoding=encoding,
**kw
)
def has_children(self):
u"""Returns true if a node has children."""
return bool(len(self))
def iter_children(self, key=None):
u"""Iterates over children.
:param key: A key for filtering children by tagname.
"""
tag = None
if key:
tag = self._get_aliases().get(key)
if not tag:
raise KeyError(key)
for child in self._xml.iterchildren(tag=tag):
if len(child):
yield self.__class__(child)
else:
yield Literal(child)
def children(self, key=None):
u"""Returns node's children.
:param key: A key for filtering children by tagname.
"""
return list(self.iter_children(key))
def update(self, **kwargs):
u"""Updating or creation of new simple nodes.
Each dict key is used as a tagname and value as text.
"""
for key, value in kwargs.items():
helper = helpers.CAST_DICT.get(type(value), str)
tag = self._get_aliases().get(key, key)
elements = list(self._xml.iterchildren(tag=tag))
if elements:
for element in elements:
element.text = helper(value)
else:
element = etree.Element(key)
element.text = helper(value)
self._xml.append(element)
self._aliases = None
def sget(self, path, default=NONE_NODE):
u"""Enables access to nodes if one or more of them don't exist.
Example:
>>> m = Mappet('<root><tag attr1="attr text">text value</tag></root>')
>>> m.sget('tag')
text value
>>> m.sget('tag.@attr1')
'attr text'
>>> m.sget('tag.#text')
'text value'
>>> m.sget('reply.vms_model_cars.car.0.params.doors')
NONE_NODE
Accessing nonexistent path returns None-like object with mocked
converting functions which returns None:
>>> m.sget('reply.fake_node').to_dict() is None
True
"""
attrs = str(path).split(".")
text_or_attr = None
last_attr = attrs[-1]
# Case of getting text or attribute
if last_attr == '#text' or last_attr.startswith('@'):
# #text => text, @attr => attr
text_or_attr = last_attr[1:]
attrs = attrs[:-1]
# When getting #text and @attr we want default value to be None.
if default is NONE_NODE:
default = None
my_object = self
for attr in attrs:
try:
if isinstance(my_object, (list, tuple)) and re.match('^\-?\d+$', attr):
my_object_next = my_object[int(attr)]
else:
my_object_next = getattr(my_object, attr)
my_object = my_object_next
except (AttributeError, KeyError, IndexError):
return default
# Return #text or @attr
if text_or_attr:
try:
return my_object.getattr(text_or_attr)
except AttributeError:
# myObject can be a list.
return None
else:
return my_object
def create(self, tag, value):
u"""Creates a node, if it doesn't exist yet.
Unlike attribute access, this allows to pass a node's name with hyphens.
Those hyphens will be normalized automatically.
In case the required element already exists, raises an exception.
Updating/overwriting should be done using `update``.
"""
child_tags = {child.tag for child in self._xml}
if tag in child_tags:
raise KeyError('Node {} already exists in XML tree.'.format(tag))
self.set(tag, value)
def set(self, name, value):
u"""Assigns a new XML structure to the node.
A literal value, dict or list can be passed in. Works for all nested levels.
Dictionary:
>>> m = Mappet('<root/>')
>>> m.head = {'a': 'A', 'b': {'#text': 'B', '@attr': 'val'}}
>>> m.head.to_str()
'<head><a>A</a><b attr="val">B</b></head>'
List:
>>> m.head = [{'a': i} for i in 'ABC']
>>> m.head.to_str()
'<head><a>A</a><a>B</a><a>C</a></head>'
Literals:
>>> m.head.leaf = 'A'
>>> m.head.leaf.get()
'A'
"""
try:
# Searches for a node to assign to.
element = next(self._xml.iterchildren(tag=name))
except StopIteration:
# There is no such node in the XML tree. We create a new one
# with current root as parent (self._xml).
element = etree.SubElement(self._xml, name)
if isinstance(value, dict):
self.assign_dict(element, value)
elif isinstance(value, (list, tuple, set)):
self.assign_sequence_or_set(element, value)
else:
# Literal value.
self.assign_literal(element, value)
# Clear the aliases.
self._aliases = None
def assign_dict(self, node, xml_dict):
"""Assigns a Python dict to a ``lxml`` node.
:param node: A node to assign the dict to.
:param xml_dict: The dict with attributes/children to use.
"""
new_node = etree.Element(node.tag)
# Replaces the previous node with the new one
self._xml.replace(node, new_node)
# Copies #text and @attrs from the xml_dict
helpers.dict_to_etree(xml_dict, new_node)
@staticmethod
def assign_sequence_or_set(element, value):
element.clear()
for item in value:
temp_element = etree.Element('temp')
helpers.dict_to_etree(item, temp_element)
for child in temp_element.iterchildren():
element.append(child)
del temp_element
@staticmethod
def assign_literal(element, value):
u"""Assigns a literal.
If a given node doesn't exist, it will be created.
:param etree.Element element: element to which we assign.
:param value: the value to assign
"""
# Searches for a conversion method specific to the type of value.
helper = helpers.CAST_DICT.get(type(value), str)
# Removes all children and attributes.
element.clear()
element.text = helper(value)
def to_dict(self, **kw):
u"""Converts the lxml object to a dict.
possible kwargs:
without_comments: bool
"""
_, value = helpers.etree_to_dict(self._xml, **kw).popitem()
return value
def _get_aliases(self):
u"""Creates a dict with aliases.
The key is a normalized tagname, value the original tagname.
"""
if self._aliases is None:
self._aliases = {}
if self._xml is not None:
for child in self._xml.iterchildren():
self._aliases[helpers.normalize_tag(child.tag)] = child.tag
return self._aliases
def xpath(
self,
path,
namespaces=None,
regexp=False,
smart_strings=True,
single_use=False,
):
u"""Executes XPath query on the ``lxml`` object and returns a correct object.
:param str path: XPath string e.g., 'cars'/'car'
:param str/dict namespaces: e.g., 'exslt', 're' or
``{'re': "http://exslt.org/regular-expressions"}``
:param bool regexp: if ``True`` and no namespaces is provided, it will use
``exslt`` namespace
:param bool smart_strings:
:param bool single_use: faster method for using only once. Does not
create ``XPathEvaluator`` instance.
>>> root = mappet.Mappet("<root><a>aB</a><b>aBc</b></root>")
>>> root.XPath(
"//*[re:test(., '^abc$', 'i')]",
namespaces='exslt',
regexp=True,
)
"""
if (
namespaces in ['exslt', 're'] or
(regexp and not namespaces)
):
namespaces = {'re': "http://exslt.org/regular-expressions"}
if single_use:
node = self._xml.xpath(path)
else:
xpe = self.xpath_evaluator(
namespaces=namespaces,
regexp=regexp,
smart_strings=smart_strings
)
node = xpe(path)
if len(node) == 1:
node = node[0]
if len(node):
return self.__class__(node)
else:
return Literal(node)
return node
def keys(self):
"""Returns a set of node's keys."""
return set(self._get_aliases().keys())
|
hackedd/gw2api
|
gw2api/__init__.py
|
set_cache_dir
|
python
|
def set_cache_dir(directory):
global cache_dir
if directory is None:
cache_dir = None
return
if not os.path.exists(directory):
os.makedirs(directory)
if not os.path.isdir(directory):
raise ValueError("not a directory")
cache_dir = directory
|
Set the directory to cache JSON responses from most API endpoints.
|
train
|
https://github.com/hackedd/gw2api/blob/5543a78e6e3ed0573b7e84c142c44004b4779eac/gw2api/__init__.py#L38-L51
| null |
import os
import requests
VERSION = "v1"
BASE_URL = "https://api.guildwars2.com/%s/" % VERSION
LANGUAGES = {"en": "English", "es": "Spanish", "de": "German", "fr": "French"}
TYPE_COIN, TYPE_ITEM, TYPE_TEXT, TYPE_MAP = 1, 2, 3, 4
TYPE_PVP_GAME, TYPE_SKILL, TYPE_TRAIT, TYPE_USER = 5, 6, 7, 8
TYPE_RECIPE, TYPE_SKIN, TYPE_OUTFIT, TYPE_OBJECTIVE = 9, 10, 11, 12
LINK_TYPES = {
"coin": TYPE_COIN,
"item": TYPE_ITEM,
"text": TYPE_TEXT,
"map": TYPE_MAP,
"pvp_game": TYPE_PVP_GAME,
"skill": TYPE_SKILL,
"trait": TYPE_TRAIT,
"user": TYPE_USER,
"recipe": TYPE_RECIPE,
"skin": TYPE_SKIN,
"outfit": TYPE_OUTFIT,
"objective": TYPE_OBJECTIVE,
}
session = requests.Session()
cache_dir = None
cache_time = 14 * 24 * 3600
def set_session(sess):
"""Set the requests.Session to use for all API requests.
"""
global session
session = sess
def set_cache_time(time):
"""Set the maximum lifetime for a cached JSON response.
"""
global cache_time
cache_time = time
def get_mumble_link():
from .mumble import gw2link
return gw2link
from .map import *
from .misc import *
from .items import *
from .skins import *
from .events import *
from .guild import *
from .wvw import *
from .util import *
|
hackedd/gw2api
|
gw2api/v2/endpoint.py
|
EndpointBase.get_cached
|
python
|
def get_cached(self, path, cache_name, **kwargs):
if gw2api.cache_dir and gw2api.cache_time and cache_name:
cache_file = os.path.join(gw2api.cache_dir, cache_name)
if mtime(cache_file) >= time.time() - gw2api.cache_time:
with open(cache_file, "r") as fp:
tmp = json.load(fp)
return self.make_response(tmp["data"], tmp["meta"])
else:
cache_file = None
meta, data = self._get(path, **kwargs)
if cache_file:
with open(cache_file, "w") as fp:
json.dump({"meta": meta, "data": data}, fp, indent=2)
return self.make_response(data, meta)
|
Request a resource form the API, first checking if there is a cached
response available. Returns the parsed JSON data.
|
train
|
https://github.com/hackedd/gw2api/blob/5543a78e6e3ed0573b7e84c142c44004b4779eac/gw2api/v2/endpoint.py#L42-L61
|
[
"def _get(self, path, **kwargs):\n token = kwargs.pop(\"token\") if \"token\" in kwargs else self.token\n if token:\n headers = kwargs.setdefault(\"headers\", {})\n headers.setdefault(\"Authorization\", \"Bearer \" + token)\n return super(AuthenticatedMixin, self)._get(path, **kwargs)\n",
"def _get(self, path, **kwargs):\n r = gw2api.session.get(gw2api.v2.BASE_URL + path, **kwargs)\n\n try:\n response = r.json()\n except ValueError: # pragma: no cover\n response = None\n\n if not r.ok and isinstance(response, dict) and \"text\" in response:\n r.reason = response[\"text\"]\n\n r.raise_for_status()\n\n return self.get_metadata(r), response\n",
"def make_response(self, data, meta):\n response_type = self.response_types.get(type(data))\n return response_type(data, meta) if response_type else data\n"
] |
class EndpointBase(object):
response_types = {
list: ListResponse,
dict: DictResponse,
}
def __init__(self, name):
super(EndpointBase, self).__init__()
self.name = name
def has_cached(self, cache_name):
if gw2api.cache_dir and gw2api.cache_time and cache_name:
cache_file = os.path.join(gw2api.cache_dir, cache_name)
return mtime(cache_file) >= time.time() - gw2api.cache_time
else:
return False
def _get(self, path, **kwargs):
r = gw2api.session.get(gw2api.v2.BASE_URL + path, **kwargs)
try:
response = r.json()
except ValueError: # pragma: no cover
response = None
if not r.ok and isinstance(response, dict) and "text" in response:
r.reason = response["text"]
r.raise_for_status()
return self.get_metadata(r), response
def get_metadata(self, r):
metadata = {}
for key, link in r.links.items():
metadata[key] = link["url"]
if "x-page-total" in r.headers:
metadata["page_total"] = int(r.headers["x-page-total"])
if "x-page-size" in r.headers:
metadata["page_size"] = int(r.headers["x-page-size"])
if "x-result-total" in r.headers:
metadata["result_total"] = int(r.headers["x-result-total"])
if "x-result-count" in r.headers:
metadata["result_count"] = int(r.headers["x-result-count"])
return metadata
def make_response(self, data, meta):
response_type = self.response_types.get(type(data))
return response_type(data, meta) if response_type else data
|
hackedd/gw2api
|
gw2api/skins.py
|
skin_details
|
python
|
def skin_details(skin_id, lang="en"):
params = {"skin_id": skin_id, "lang": lang}
cache_name = "skin_details.%(skin_id)s.%(lang)s.json" % params
return get_cached("skin_details.json", cache_name, params=params)
|
This resource returns details about a single skin.
:param skin_id: The skin to query for.
:param lang: The language to display the texts in.
The response is an object with at least the following properties. Note that
the availability of some properties depends on the type of item the skin
applies to.
skin_id (number):
The skin id.
name (string):
The name of the skin.
type (string):
The type of item the skin applies to. One of ``Armor``, ``Back`` or
``Weapon``.
flags (list):
Skin flags. Currently known skin flags are ``ShowInWardrobe``,
``HideIfLocked`` and ``NoCost``.
restrictions (list):
Race restrictions: ``Asura``, ``Charr``, ``Human``, ``Norn`` and
``Sylvari``.
icon_file_id (string):
The icon file id to be used with the render service.
icon_file_signature (string):
The icon file signature to be used with the render service.
|
train
|
https://github.com/hackedd/gw2api/blob/5543a78e6e3ed0573b7e84c142c44004b4779eac/gw2api/skins.py#L16-L53
|
[
"def get_cached(path, cache_name=None, **kwargs):\n \"\"\"Request a resource form the API, first checking if there is a cached\n response available. Returns the parsed JSON data.\n \"\"\"\n if gw2api.cache_dir and gw2api.cache_time and cache_name is not False:\n if cache_name is None:\n cache_name = path\n cache_file = os.path.join(gw2api.cache_dir, cache_name)\n if mtime(cache_file) >= time.time() - gw2api.cache_time:\n with open(cache_file, \"r\") as fp:\n return json.load(fp)\n else:\n cache_file = None\n\n r = gw2api.session.get(gw2api.BASE_URL + path, **kwargs)\n\n if not r.ok:\n try:\n response = r.json()\n except ValueError: # pragma: no cover\n response = None\n if isinstance(response, dict) and \"text\" in response:\n r.reason = response[\"text\"]\n\n r.raise_for_status()\n data = r.json()\n\n if cache_file:\n with open(cache_file, \"w\") as fp:\n json.dump(data, fp, indent=2)\n\n return data\n"
] |
from .util import get_cached
__all__ = ("skins", "skin_details")
def skins():
"""This resource returns a list of skins that were discovered by players
in the game. Details about a single skin can be obtained using the
:func:`skin_details` resource.
"""
return get_cached("skins.json").get("skins")
|
hackedd/gw2api
|
gw2api/mumble.py
|
GuildWars2FileMapping.get_map_location
|
python
|
def get_map_location(self):
map_data = self.get_map()
(bounds_e, bounds_n), (bounds_w, bounds_s) = map_data["continent_rect"]
(map_e, map_n), (map_w, map_s) = map_data["map_rect"]
assert bounds_w < bounds_e
assert bounds_n < bounds_s
assert map_w < map_e
assert map_n < map_s
meters_to_inches = 39.3701
x, y, z = self.fAvatarPosition
map_x = bounds_w + ((x * meters_to_inches - map_w) /
(map_e - map_w) * (bounds_e - bounds_w))
map_y = bounds_n + ((-z * meters_to_inches - map_n) /
(map_s - map_n) * (bounds_s - bounds_n))
map_z = y * meters_to_inches
return map_x, map_y, map_z
|
Get the location of the player, converted to world coordinates.
:return: a tuple (x, y, z).
|
train
|
https://github.com/hackedd/gw2api/blob/5543a78e6e3ed0573b7e84c142c44004b4779eac/gw2api/mumble.py#L93-L117
|
[
"def get_map(self, lang=\"en\"):\n return maps(self.context.mapId, lang)\n"
] |
class GuildWars2FileMapping(FileMapping):
def __init__(self, name=u"MumbleLink", value_struct=LinkedMem,
create=True):
super(GuildWars2FileMapping, self).__init__(name, value_struct, create)
def get_map(self, lang="en"):
return maps(self.context.mapId, lang)
|
hackedd/gw2api
|
gw2api/wvw.py
|
matches
|
python
|
def matches():
wvw_matches = get_cached("wvw/matches.json", False).get("wvw_matches")
for match in wvw_matches:
match["start_time"] = parse_datetime(match["start_time"])
match["end_time"] = parse_datetime(match["end_time"])
return wvw_matches
|
This resource returns a list of the currently running WvW matches, with
the participating worlds included in the result. Further details about a
match can be requested using the ``match_details`` function.
The response is a list of match objects, each of which contains the
following properties:
wvw_match_id (string):
The WvW match id.
red_world_id (number):
The world id of the red world.
blue_world_id (number):
The world id of the blue world.
green_world_id (number):
The world id of the green world.
start_time (datetime):
A timestamp of when the match started.
end_time (datetime):
A timestamp of when the match ends.
|
train
|
https://github.com/hackedd/gw2api/blob/5543a78e6e3ed0573b7e84c142c44004b4779eac/gw2api/wvw.py#L20-L51
|
[
"def get_cached(path, cache_name=None, **kwargs):\n \"\"\"Request a resource form the API, first checking if there is a cached\n response available. Returns the parsed JSON data.\n \"\"\"\n if gw2api.cache_dir and gw2api.cache_time and cache_name is not False:\n if cache_name is None:\n cache_name = path\n cache_file = os.path.join(gw2api.cache_dir, cache_name)\n if mtime(cache_file) >= time.time() - gw2api.cache_time:\n with open(cache_file, \"r\") as fp:\n return json.load(fp)\n else:\n cache_file = None\n\n r = gw2api.session.get(gw2api.BASE_URL + path, **kwargs)\n\n if not r.ok:\n try:\n response = r.json()\n except ValueError: # pragma: no cover\n response = None\n if isinstance(response, dict) and \"text\" in response:\n r.reason = response[\"text\"]\n\n r.raise_for_status()\n data = r.json()\n\n if cache_file:\n with open(cache_file, \"w\") as fp:\n json.dump(data, fp, indent=2)\n\n return data\n",
"def parse_datetime(date_string):\n \"\"\"Parse a datetime string as returned by the ``matches`` endpoint to a\n datetime object.\n\n >>> parse_datetime('2014-07-04T18:00:00Z')\n datetime.datetime(2014, 7, 4, 18, 0)\n\n \"\"\"\n return datetime.strptime(date_string, \"%Y-%m-%dT%H:%M:%SZ\")\n"
] |
from datetime import datetime
from .util import get_cached
__all__ = ("matches", "match_details", "objective_names")
def parse_datetime(date_string):
"""Parse a datetime string as returned by the ``matches`` endpoint to a
datetime object.
>>> parse_datetime('2014-07-04T18:00:00Z')
datetime.datetime(2014, 7, 4, 18, 0)
"""
return datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%SZ")
def match_details(match_id):
"""This resource returns further details about the specified match,
including the total score and further details for each map.
:param match_id: The WvW match to query for.
The response is an object with the following properties:
match_id (string):
The WvW match id.
scores (list):
A list of the three total scores (order: red, blue, green).
maps (list):
A list of objects containing detailed information about each of the
four maps.
The map detail objects contain the following properties:
type (string):
The identifier for the map. Can be either RedHome, GreenHome or
BlueHome for the borderlands or Center for Eternal Battlegrounds.
scores (list):
A list of the three individual scores for this map; in the order red,
blue, green.
objectives (list):
A list of objective objects for this map. Each object contains the
following properties:
id (number):
The objective id.
owner (string):
The current owner of the objective. Can be any one of Red, Green,
Blue or Neutral.
owner_guild (string):
The guild id of the guild currently claiming the objective. This
property is missing if the objective is not claimed.
bonuses (list):
A list of all bonuses being granted by this map. If no player team
owns a bonus from the map, this list is empty.
type (string):
A shorthand name for the bonus. Currently the only known bonus
type is ``bloodlust``: `Borderlands Bloodlust`_
owner (string):
The current owner of the bonus. Can be any one of Red, Green, or
Blue. Neutral-owned bonuses are not listed.
.. _Borderlands Bloodlust:
http://wiki.guildwars2.com/wiki/Borderlands_Bloodlust
"""
return get_cached("wvw/match_details.json", False,
params={"match_id": match_id})
def objective_names(lang="en"):
"""This resource returns a list of the localized WvW objective names for
the specified language.
:param lang: The language to query the names for.
:return: A dictionary mapping the objective Ids to the names.
*Note that these are not the names displayed in the game, but rather the
abstract type.*
"""
params = {"lang": lang}
cache_name = "objective_names.%(lang)s.json" % params
data = get_cached("wvw/objective_names.json", cache_name, params=params)
return dict([(objective["id"], objective["name"]) for objective in data])
|
hackedd/gw2api
|
gw2api/wvw.py
|
objective_names
|
python
|
def objective_names(lang="en"):
params = {"lang": lang}
cache_name = "objective_names.%(lang)s.json" % params
data = get_cached("wvw/objective_names.json", cache_name, params=params)
return dict([(objective["id"], objective["name"]) for objective in data])
|
This resource returns a list of the localized WvW objective names for
the specified language.
:param lang: The language to query the names for.
:return: A dictionary mapping the objective Ids to the names.
*Note that these are not the names displayed in the game, but rather the
abstract type.*
|
train
|
https://github.com/hackedd/gw2api/blob/5543a78e6e3ed0573b7e84c142c44004b4779eac/gw2api/wvw.py#L117-L131
|
[
"def get_cached(path, cache_name=None, **kwargs):\n \"\"\"Request a resource form the API, first checking if there is a cached\n response available. Returns the parsed JSON data.\n \"\"\"\n if gw2api.cache_dir and gw2api.cache_time and cache_name is not False:\n if cache_name is None:\n cache_name = path\n cache_file = os.path.join(gw2api.cache_dir, cache_name)\n if mtime(cache_file) >= time.time() - gw2api.cache_time:\n with open(cache_file, \"r\") as fp:\n return json.load(fp)\n else:\n cache_file = None\n\n r = gw2api.session.get(gw2api.BASE_URL + path, **kwargs)\n\n if not r.ok:\n try:\n response = r.json()\n except ValueError: # pragma: no cover\n response = None\n if isinstance(response, dict) and \"text\" in response:\n r.reason = response[\"text\"]\n\n r.raise_for_status()\n data = r.json()\n\n if cache_file:\n with open(cache_file, \"w\") as fp:\n json.dump(data, fp, indent=2)\n\n return data\n"
] |
from datetime import datetime
from .util import get_cached
__all__ = ("matches", "match_details", "objective_names")
def parse_datetime(date_string):
"""Parse a datetime string as returned by the ``matches`` endpoint to a
datetime object.
>>> parse_datetime('2014-07-04T18:00:00Z')
datetime.datetime(2014, 7, 4, 18, 0)
"""
return datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%SZ")
def matches():
"""This resource returns a list of the currently running WvW matches, with
the participating worlds included in the result. Further details about a
match can be requested using the ``match_details`` function.
The response is a list of match objects, each of which contains the
following properties:
wvw_match_id (string):
The WvW match id.
red_world_id (number):
The world id of the red world.
blue_world_id (number):
The world id of the blue world.
green_world_id (number):
The world id of the green world.
start_time (datetime):
A timestamp of when the match started.
end_time (datetime):
A timestamp of when the match ends.
"""
wvw_matches = get_cached("wvw/matches.json", False).get("wvw_matches")
for match in wvw_matches:
match["start_time"] = parse_datetime(match["start_time"])
match["end_time"] = parse_datetime(match["end_time"])
return wvw_matches
def match_details(match_id):
"""This resource returns further details about the specified match,
including the total score and further details for each map.
:param match_id: The WvW match to query for.
The response is an object with the following properties:
match_id (string):
The WvW match id.
scores (list):
A list of the three total scores (order: red, blue, green).
maps (list):
A list of objects containing detailed information about each of the
four maps.
The map detail objects contain the following properties:
type (string):
The identifier for the map. Can be either RedHome, GreenHome or
BlueHome for the borderlands or Center for Eternal Battlegrounds.
scores (list):
A list of the three individual scores for this map; in the order red,
blue, green.
objectives (list):
A list of objective objects for this map. Each object contains the
following properties:
id (number):
The objective id.
owner (string):
The current owner of the objective. Can be any one of Red, Green,
Blue or Neutral.
owner_guild (string):
The guild id of the guild currently claiming the objective. This
property is missing if the objective is not claimed.
bonuses (list):
A list of all bonuses being granted by this map. If no player team
owns a bonus from the map, this list is empty.
type (string):
A shorthand name for the bonus. Currently the only known bonus
type is ``bloodlust``: `Borderlands Bloodlust`_
owner (string):
The current owner of the bonus. Can be any one of Red, Green, or
Blue. Neutral-owned bonuses are not listed.
.. _Borderlands Bloodlust:
http://wiki.guildwars2.com/wiki/Borderlands_Bloodlust
"""
return get_cached("wvw/match_details.json", False,
params={"match_id": match_id})
|
hackedd/gw2api
|
gw2api/util.py
|
mtime
|
python
|
def mtime(path):
if not os.path.exists(path):
return -1
stat = os.stat(path)
return stat.st_mtime
|
Get the modification time of a file, or -1 if the file does not exist.
|
train
|
https://github.com/hackedd/gw2api/blob/5543a78e6e3ed0573b7e84c142c44004b4779eac/gw2api/util.py#L14-L20
| null |
import os
import time
import json
from struct import pack, unpack
from base64 import b64encode, b64decode
import gw2api
__all__ = ("encode_item_link", "encode_coin_link",
"encode_chat_link", "decode_chat_link")
def get_cached(path, cache_name=None, **kwargs):
"""Request a resource form the API, first checking if there is a cached
response available. Returns the parsed JSON data.
"""
if gw2api.cache_dir and gw2api.cache_time and cache_name is not False:
if cache_name is None:
cache_name = path
cache_file = os.path.join(gw2api.cache_dir, cache_name)
if mtime(cache_file) >= time.time() - gw2api.cache_time:
with open(cache_file, "r") as fp:
return json.load(fp)
else:
cache_file = None
r = gw2api.session.get(gw2api.BASE_URL + path, **kwargs)
if not r.ok:
try:
response = r.json()
except ValueError: # pragma: no cover
response = None
if isinstance(response, dict) and "text" in response:
r.reason = response["text"]
r.raise_for_status()
data = r.json()
if cache_file:
with open(cache_file, "w") as fp:
json.dump(data, fp, indent=2)
return data
def encode_item_link(item_id, number=1, skin_id=None,
upgrade1=None, upgrade2=None):
"""Encode a chat link for an item (or a stack of items).
:param item_id: the Id of the item
:param number: the number of items in the stack
:param skin_id: the id of the skin applied to the item
:param upgrade1: the id of the first upgrade component
:param upgrade2: the id of the second upgrade component
"""
return encode_chat_link(gw2api.TYPE_ITEM, id=item_id, number=number,
skin_id=skin_id, upgrade1=upgrade1,
upgrade2=upgrade2)
def encode_coin_link(copper, silver=0, gold=0):
"""Encode a chat link for an amount of coins.
"""
return encode_chat_link(gw2api.TYPE_COIN, copper=copper, silver=silver,
gold=gold)
def encode_chat_link(link_type, **kwargs):
if link_type in gw2api.LINK_TYPES:
link_type = gw2api.LINK_TYPES[link_type]
if link_type == gw2api.TYPE_COIN:
if "copper" in kwargs or "silver" in kwargs or "gold" in kwargs:
amount = (kwargs.get("gold", 0) * 100 * 100 +
kwargs.get("silver", 0) * 100 +
kwargs.get("copper", 0))
else:
amount = kwargs["amount"]
data = pack("<BI", link_type, amount)
elif link_type == gw2api.TYPE_ITEM:
item_id = kwargs["id"]
args = []
for i, key in enumerate(("skin_id", "upgrade1", "upgrade2")):
value = kwargs.get(key)
if value:
item_id |= 2 << (28 + i)
args.append(value)
format = "<BBI" + "I" * len(args)
data = pack(format, link_type, kwargs.get("number", 1), item_id, *args)
elif link_type in (gw2api.TYPE_TEXT, gw2api.TYPE_MAP, gw2api.TYPE_SKILL,
gw2api.TYPE_TRAIT, gw2api.TYPE_RECIPE,
gw2api.TYPE_SKIN, gw2api.TYPE_OUTFIT):
data = pack("<BI", link_type, kwargs["id"])
elif link_type == gw2api.TYPE_OBJECTIVE:
data = pack("<BII", link_type,
kwargs["objective_id"], kwargs["map_id"])
elif isinstance(link_type, int):
raise Exception("Unknown link type 0x%02x" % link_type)
else:
raise Exception("Unknown link type '%s'" % link_type)
return "[&%s]" % b64encode(data).decode("ascii")
def decode_chat_link(string):
if string.startswith("[&") and string.endswith("]"):
string = string[2:-1]
data = b64decode(string.encode("ascii"))
link_type, = unpack("<B", data[:1])
if link_type == gw2api.TYPE_COIN:
amount, = unpack("<I", data[1:])
return "coin", {"amount": amount}
if link_type == gw2api.TYPE_ITEM:
number, item_id = unpack("<BI", data[1:6])
flags = (item_id & 0xFF000000) >> 24
item_id &= 0x00FFFFFF
values = {"number": number, "id": item_id}
o = 6
if flags & 0x80:
values["skin_id"], = unpack("<I", data[o:o+4])
o += 4
if flags & 0x40:
values["upgrade1"], = unpack("<I", data[o:o+4])
o += 4
if flags & 0x20:
values["upgrade2"], = unpack("<I", data[o:o+4])
o += 4
return "item", values
link_type_string = None
for key, value in gw2api.LINK_TYPES.items():
if value == link_type:
link_type_string = key
if link_type in (gw2api.TYPE_TEXT, gw2api.TYPE_MAP, gw2api.TYPE_SKILL,
gw2api.TYPE_TRAIT, gw2api.TYPE_RECIPE,
gw2api.TYPE_SKIN, gw2api.TYPE_OUTFIT):
id, = unpack("<I", data[1:])
return link_type_string, {"id": id}
if link_type == gw2api.TYPE_OBJECTIVE:
objective_id, map_id = unpack("<II", data[1:])
return link_type_string, {"objective_id": objective_id,
"map_id": map_id}
raise Exception("Unknown link type 0x%02x" % link_type)
|
hackedd/gw2api
|
gw2api/util.py
|
get_cached
|
python
|
def get_cached(path, cache_name=None, **kwargs):
if gw2api.cache_dir and gw2api.cache_time and cache_name is not False:
if cache_name is None:
cache_name = path
cache_file = os.path.join(gw2api.cache_dir, cache_name)
if mtime(cache_file) >= time.time() - gw2api.cache_time:
with open(cache_file, "r") as fp:
return json.load(fp)
else:
cache_file = None
r = gw2api.session.get(gw2api.BASE_URL + path, **kwargs)
if not r.ok:
try:
response = r.json()
except ValueError: # pragma: no cover
response = None
if isinstance(response, dict) and "text" in response:
r.reason = response["text"]
r.raise_for_status()
data = r.json()
if cache_file:
with open(cache_file, "w") as fp:
json.dump(data, fp, indent=2)
return data
|
Request a resource form the API, first checking if there is a cached
response available. Returns the parsed JSON data.
|
train
|
https://github.com/hackedd/gw2api/blob/5543a78e6e3ed0573b7e84c142c44004b4779eac/gw2api/util.py#L23-L54
| null |
import os
import time
import json
from struct import pack, unpack
from base64 import b64encode, b64decode
import gw2api
__all__ = ("encode_item_link", "encode_coin_link",
"encode_chat_link", "decode_chat_link")
def mtime(path):
"""Get the modification time of a file, or -1 if the file does not exist.
"""
if not os.path.exists(path):
return -1
stat = os.stat(path)
return stat.st_mtime
def encode_item_link(item_id, number=1, skin_id=None,
upgrade1=None, upgrade2=None):
"""Encode a chat link for an item (or a stack of items).
:param item_id: the Id of the item
:param number: the number of items in the stack
:param skin_id: the id of the skin applied to the item
:param upgrade1: the id of the first upgrade component
:param upgrade2: the id of the second upgrade component
"""
return encode_chat_link(gw2api.TYPE_ITEM, id=item_id, number=number,
skin_id=skin_id, upgrade1=upgrade1,
upgrade2=upgrade2)
def encode_coin_link(copper, silver=0, gold=0):
"""Encode a chat link for an amount of coins.
"""
return encode_chat_link(gw2api.TYPE_COIN, copper=copper, silver=silver,
gold=gold)
def encode_chat_link(link_type, **kwargs):
if link_type in gw2api.LINK_TYPES:
link_type = gw2api.LINK_TYPES[link_type]
if link_type == gw2api.TYPE_COIN:
if "copper" in kwargs or "silver" in kwargs or "gold" in kwargs:
amount = (kwargs.get("gold", 0) * 100 * 100 +
kwargs.get("silver", 0) * 100 +
kwargs.get("copper", 0))
else:
amount = kwargs["amount"]
data = pack("<BI", link_type, amount)
elif link_type == gw2api.TYPE_ITEM:
item_id = kwargs["id"]
args = []
for i, key in enumerate(("skin_id", "upgrade1", "upgrade2")):
value = kwargs.get(key)
if value:
item_id |= 2 << (28 + i)
args.append(value)
format = "<BBI" + "I" * len(args)
data = pack(format, link_type, kwargs.get("number", 1), item_id, *args)
elif link_type in (gw2api.TYPE_TEXT, gw2api.TYPE_MAP, gw2api.TYPE_SKILL,
gw2api.TYPE_TRAIT, gw2api.TYPE_RECIPE,
gw2api.TYPE_SKIN, gw2api.TYPE_OUTFIT):
data = pack("<BI", link_type, kwargs["id"])
elif link_type == gw2api.TYPE_OBJECTIVE:
data = pack("<BII", link_type,
kwargs["objective_id"], kwargs["map_id"])
elif isinstance(link_type, int):
raise Exception("Unknown link type 0x%02x" % link_type)
else:
raise Exception("Unknown link type '%s'" % link_type)
return "[&%s]" % b64encode(data).decode("ascii")
def decode_chat_link(string):
if string.startswith("[&") and string.endswith("]"):
string = string[2:-1]
data = b64decode(string.encode("ascii"))
link_type, = unpack("<B", data[:1])
if link_type == gw2api.TYPE_COIN:
amount, = unpack("<I", data[1:])
return "coin", {"amount": amount}
if link_type == gw2api.TYPE_ITEM:
number, item_id = unpack("<BI", data[1:6])
flags = (item_id & 0xFF000000) >> 24
item_id &= 0x00FFFFFF
values = {"number": number, "id": item_id}
o = 6
if flags & 0x80:
values["skin_id"], = unpack("<I", data[o:o+4])
o += 4
if flags & 0x40:
values["upgrade1"], = unpack("<I", data[o:o+4])
o += 4
if flags & 0x20:
values["upgrade2"], = unpack("<I", data[o:o+4])
o += 4
return "item", values
link_type_string = None
for key, value in gw2api.LINK_TYPES.items():
if value == link_type:
link_type_string = key
if link_type in (gw2api.TYPE_TEXT, gw2api.TYPE_MAP, gw2api.TYPE_SKILL,
gw2api.TYPE_TRAIT, gw2api.TYPE_RECIPE,
gw2api.TYPE_SKIN, gw2api.TYPE_OUTFIT):
id, = unpack("<I", data[1:])
return link_type_string, {"id": id}
if link_type == gw2api.TYPE_OBJECTIVE:
objective_id, map_id = unpack("<II", data[1:])
return link_type_string, {"objective_id": objective_id,
"map_id": map_id}
raise Exception("Unknown link type 0x%02x" % link_type)
|
hackedd/gw2api
|
gw2api/util.py
|
encode_item_link
|
python
|
def encode_item_link(item_id, number=1, skin_id=None,
upgrade1=None, upgrade2=None):
return encode_chat_link(gw2api.TYPE_ITEM, id=item_id, number=number,
skin_id=skin_id, upgrade1=upgrade1,
upgrade2=upgrade2)
|
Encode a chat link for an item (or a stack of items).
:param item_id: the Id of the item
:param number: the number of items in the stack
:param skin_id: the id of the skin applied to the item
:param upgrade1: the id of the first upgrade component
:param upgrade2: the id of the second upgrade component
|
train
|
https://github.com/hackedd/gw2api/blob/5543a78e6e3ed0573b7e84c142c44004b4779eac/gw2api/util.py#L57-L69
|
[
"def encode_chat_link(link_type, **kwargs):\n if link_type in gw2api.LINK_TYPES:\n link_type = gw2api.LINK_TYPES[link_type]\n\n if link_type == gw2api.TYPE_COIN:\n if \"copper\" in kwargs or \"silver\" in kwargs or \"gold\" in kwargs:\n amount = (kwargs.get(\"gold\", 0) * 100 * 100 +\n kwargs.get(\"silver\", 0) * 100 +\n kwargs.get(\"copper\", 0))\n else:\n amount = kwargs[\"amount\"]\n data = pack(\"<BI\", link_type, amount)\n\n elif link_type == gw2api.TYPE_ITEM:\n item_id = kwargs[\"id\"]\n\n args = []\n for i, key in enumerate((\"skin_id\", \"upgrade1\", \"upgrade2\")):\n value = kwargs.get(key)\n if value:\n item_id |= 2 << (28 + i)\n args.append(value)\n\n format = \"<BBI\" + \"I\" * len(args)\n data = pack(format, link_type, kwargs.get(\"number\", 1), item_id, *args)\n\n elif link_type in (gw2api.TYPE_TEXT, gw2api.TYPE_MAP, gw2api.TYPE_SKILL,\n gw2api.TYPE_TRAIT, gw2api.TYPE_RECIPE,\n gw2api.TYPE_SKIN, gw2api.TYPE_OUTFIT):\n data = pack(\"<BI\", link_type, kwargs[\"id\"])\n\n elif link_type == gw2api.TYPE_OBJECTIVE:\n data = pack(\"<BII\", link_type,\n kwargs[\"objective_id\"], kwargs[\"map_id\"])\n\n elif isinstance(link_type, int):\n raise Exception(\"Unknown link type 0x%02x\" % link_type)\n\n else:\n raise Exception(\"Unknown link type '%s'\" % link_type)\n\n return \"[&%s]\" % b64encode(data).decode(\"ascii\")\n"
] |
import os
import time
import json
from struct import pack, unpack
from base64 import b64encode, b64decode
import gw2api
__all__ = ("encode_item_link", "encode_coin_link",
"encode_chat_link", "decode_chat_link")
def mtime(path):
"""Get the modification time of a file, or -1 if the file does not exist.
"""
if not os.path.exists(path):
return -1
stat = os.stat(path)
return stat.st_mtime
def get_cached(path, cache_name=None, **kwargs):
"""Request a resource form the API, first checking if there is a cached
response available. Returns the parsed JSON data.
"""
if gw2api.cache_dir and gw2api.cache_time and cache_name is not False:
if cache_name is None:
cache_name = path
cache_file = os.path.join(gw2api.cache_dir, cache_name)
if mtime(cache_file) >= time.time() - gw2api.cache_time:
with open(cache_file, "r") as fp:
return json.load(fp)
else:
cache_file = None
r = gw2api.session.get(gw2api.BASE_URL + path, **kwargs)
if not r.ok:
try:
response = r.json()
except ValueError: # pragma: no cover
response = None
if isinstance(response, dict) and "text" in response:
r.reason = response["text"]
r.raise_for_status()
data = r.json()
if cache_file:
with open(cache_file, "w") as fp:
json.dump(data, fp, indent=2)
return data
def encode_coin_link(copper, silver=0, gold=0):
"""Encode a chat link for an amount of coins.
"""
return encode_chat_link(gw2api.TYPE_COIN, copper=copper, silver=silver,
gold=gold)
def encode_chat_link(link_type, **kwargs):
if link_type in gw2api.LINK_TYPES:
link_type = gw2api.LINK_TYPES[link_type]
if link_type == gw2api.TYPE_COIN:
if "copper" in kwargs or "silver" in kwargs or "gold" in kwargs:
amount = (kwargs.get("gold", 0) * 100 * 100 +
kwargs.get("silver", 0) * 100 +
kwargs.get("copper", 0))
else:
amount = kwargs["amount"]
data = pack("<BI", link_type, amount)
elif link_type == gw2api.TYPE_ITEM:
item_id = kwargs["id"]
args = []
for i, key in enumerate(("skin_id", "upgrade1", "upgrade2")):
value = kwargs.get(key)
if value:
item_id |= 2 << (28 + i)
args.append(value)
format = "<BBI" + "I" * len(args)
data = pack(format, link_type, kwargs.get("number", 1), item_id, *args)
elif link_type in (gw2api.TYPE_TEXT, gw2api.TYPE_MAP, gw2api.TYPE_SKILL,
gw2api.TYPE_TRAIT, gw2api.TYPE_RECIPE,
gw2api.TYPE_SKIN, gw2api.TYPE_OUTFIT):
data = pack("<BI", link_type, kwargs["id"])
elif link_type == gw2api.TYPE_OBJECTIVE:
data = pack("<BII", link_type,
kwargs["objective_id"], kwargs["map_id"])
elif isinstance(link_type, int):
raise Exception("Unknown link type 0x%02x" % link_type)
else:
raise Exception("Unknown link type '%s'" % link_type)
return "[&%s]" % b64encode(data).decode("ascii")
def decode_chat_link(string):
if string.startswith("[&") and string.endswith("]"):
string = string[2:-1]
data = b64decode(string.encode("ascii"))
link_type, = unpack("<B", data[:1])
if link_type == gw2api.TYPE_COIN:
amount, = unpack("<I", data[1:])
return "coin", {"amount": amount}
if link_type == gw2api.TYPE_ITEM:
number, item_id = unpack("<BI", data[1:6])
flags = (item_id & 0xFF000000) >> 24
item_id &= 0x00FFFFFF
values = {"number": number, "id": item_id}
o = 6
if flags & 0x80:
values["skin_id"], = unpack("<I", data[o:o+4])
o += 4
if flags & 0x40:
values["upgrade1"], = unpack("<I", data[o:o+4])
o += 4
if flags & 0x20:
values["upgrade2"], = unpack("<I", data[o:o+4])
o += 4
return "item", values
link_type_string = None
for key, value in gw2api.LINK_TYPES.items():
if value == link_type:
link_type_string = key
if link_type in (gw2api.TYPE_TEXT, gw2api.TYPE_MAP, gw2api.TYPE_SKILL,
gw2api.TYPE_TRAIT, gw2api.TYPE_RECIPE,
gw2api.TYPE_SKIN, gw2api.TYPE_OUTFIT):
id, = unpack("<I", data[1:])
return link_type_string, {"id": id}
if link_type == gw2api.TYPE_OBJECTIVE:
objective_id, map_id = unpack("<II", data[1:])
return link_type_string, {"objective_id": objective_id,
"map_id": map_id}
raise Exception("Unknown link type 0x%02x" % link_type)
|
hackedd/gw2api
|
gw2api/util.py
|
encode_coin_link
|
python
|
def encode_coin_link(copper, silver=0, gold=0):
return encode_chat_link(gw2api.TYPE_COIN, copper=copper, silver=silver,
gold=gold)
|
Encode a chat link for an amount of coins.
|
train
|
https://github.com/hackedd/gw2api/blob/5543a78e6e3ed0573b7e84c142c44004b4779eac/gw2api/util.py#L72-L76
|
[
"def encode_chat_link(link_type, **kwargs):\n if link_type in gw2api.LINK_TYPES:\n link_type = gw2api.LINK_TYPES[link_type]\n\n if link_type == gw2api.TYPE_COIN:\n if \"copper\" in kwargs or \"silver\" in kwargs or \"gold\" in kwargs:\n amount = (kwargs.get(\"gold\", 0) * 100 * 100 +\n kwargs.get(\"silver\", 0) * 100 +\n kwargs.get(\"copper\", 0))\n else:\n amount = kwargs[\"amount\"]\n data = pack(\"<BI\", link_type, amount)\n\n elif link_type == gw2api.TYPE_ITEM:\n item_id = kwargs[\"id\"]\n\n args = []\n for i, key in enumerate((\"skin_id\", \"upgrade1\", \"upgrade2\")):\n value = kwargs.get(key)\n if value:\n item_id |= 2 << (28 + i)\n args.append(value)\n\n format = \"<BBI\" + \"I\" * len(args)\n data = pack(format, link_type, kwargs.get(\"number\", 1), item_id, *args)\n\n elif link_type in (gw2api.TYPE_TEXT, gw2api.TYPE_MAP, gw2api.TYPE_SKILL,\n gw2api.TYPE_TRAIT, gw2api.TYPE_RECIPE,\n gw2api.TYPE_SKIN, gw2api.TYPE_OUTFIT):\n data = pack(\"<BI\", link_type, kwargs[\"id\"])\n\n elif link_type == gw2api.TYPE_OBJECTIVE:\n data = pack(\"<BII\", link_type,\n kwargs[\"objective_id\"], kwargs[\"map_id\"])\n\n elif isinstance(link_type, int):\n raise Exception(\"Unknown link type 0x%02x\" % link_type)\n\n else:\n raise Exception(\"Unknown link type '%s'\" % link_type)\n\n return \"[&%s]\" % b64encode(data).decode(\"ascii\")\n"
] |
import os
import time
import json
from struct import pack, unpack
from base64 import b64encode, b64decode
import gw2api
__all__ = ("encode_item_link", "encode_coin_link",
"encode_chat_link", "decode_chat_link")
def mtime(path):
"""Get the modification time of a file, or -1 if the file does not exist.
"""
if not os.path.exists(path):
return -1
stat = os.stat(path)
return stat.st_mtime
def get_cached(path, cache_name=None, **kwargs):
"""Request a resource form the API, first checking if there is a cached
response available. Returns the parsed JSON data.
"""
if gw2api.cache_dir and gw2api.cache_time and cache_name is not False:
if cache_name is None:
cache_name = path
cache_file = os.path.join(gw2api.cache_dir, cache_name)
if mtime(cache_file) >= time.time() - gw2api.cache_time:
with open(cache_file, "r") as fp:
return json.load(fp)
else:
cache_file = None
r = gw2api.session.get(gw2api.BASE_URL + path, **kwargs)
if not r.ok:
try:
response = r.json()
except ValueError: # pragma: no cover
response = None
if isinstance(response, dict) and "text" in response:
r.reason = response["text"]
r.raise_for_status()
data = r.json()
if cache_file:
with open(cache_file, "w") as fp:
json.dump(data, fp, indent=2)
return data
def encode_item_link(item_id, number=1, skin_id=None,
upgrade1=None, upgrade2=None):
"""Encode a chat link for an item (or a stack of items).
:param item_id: the Id of the item
:param number: the number of items in the stack
:param skin_id: the id of the skin applied to the item
:param upgrade1: the id of the first upgrade component
:param upgrade2: the id of the second upgrade component
"""
return encode_chat_link(gw2api.TYPE_ITEM, id=item_id, number=number,
skin_id=skin_id, upgrade1=upgrade1,
upgrade2=upgrade2)
def encode_chat_link(link_type, **kwargs):
if link_type in gw2api.LINK_TYPES:
link_type = gw2api.LINK_TYPES[link_type]
if link_type == gw2api.TYPE_COIN:
if "copper" in kwargs or "silver" in kwargs or "gold" in kwargs:
amount = (kwargs.get("gold", 0) * 100 * 100 +
kwargs.get("silver", 0) * 100 +
kwargs.get("copper", 0))
else:
amount = kwargs["amount"]
data = pack("<BI", link_type, amount)
elif link_type == gw2api.TYPE_ITEM:
item_id = kwargs["id"]
args = []
for i, key in enumerate(("skin_id", "upgrade1", "upgrade2")):
value = kwargs.get(key)
if value:
item_id |= 2 << (28 + i)
args.append(value)
format = "<BBI" + "I" * len(args)
data = pack(format, link_type, kwargs.get("number", 1), item_id, *args)
elif link_type in (gw2api.TYPE_TEXT, gw2api.TYPE_MAP, gw2api.TYPE_SKILL,
gw2api.TYPE_TRAIT, gw2api.TYPE_RECIPE,
gw2api.TYPE_SKIN, gw2api.TYPE_OUTFIT):
data = pack("<BI", link_type, kwargs["id"])
elif link_type == gw2api.TYPE_OBJECTIVE:
data = pack("<BII", link_type,
kwargs["objective_id"], kwargs["map_id"])
elif isinstance(link_type, int):
raise Exception("Unknown link type 0x%02x" % link_type)
else:
raise Exception("Unknown link type '%s'" % link_type)
return "[&%s]" % b64encode(data).decode("ascii")
def decode_chat_link(string):
if string.startswith("[&") and string.endswith("]"):
string = string[2:-1]
data = b64decode(string.encode("ascii"))
link_type, = unpack("<B", data[:1])
if link_type == gw2api.TYPE_COIN:
amount, = unpack("<I", data[1:])
return "coin", {"amount": amount}
if link_type == gw2api.TYPE_ITEM:
number, item_id = unpack("<BI", data[1:6])
flags = (item_id & 0xFF000000) >> 24
item_id &= 0x00FFFFFF
values = {"number": number, "id": item_id}
o = 6
if flags & 0x80:
values["skin_id"], = unpack("<I", data[o:o+4])
o += 4
if flags & 0x40:
values["upgrade1"], = unpack("<I", data[o:o+4])
o += 4
if flags & 0x20:
values["upgrade2"], = unpack("<I", data[o:o+4])
o += 4
return "item", values
link_type_string = None
for key, value in gw2api.LINK_TYPES.items():
if value == link_type:
link_type_string = key
if link_type in (gw2api.TYPE_TEXT, gw2api.TYPE_MAP, gw2api.TYPE_SKILL,
gw2api.TYPE_TRAIT, gw2api.TYPE_RECIPE,
gw2api.TYPE_SKIN, gw2api.TYPE_OUTFIT):
id, = unpack("<I", data[1:])
return link_type_string, {"id": id}
if link_type == gw2api.TYPE_OBJECTIVE:
objective_id, map_id = unpack("<II", data[1:])
return link_type_string, {"objective_id": objective_id,
"map_id": map_id}
raise Exception("Unknown link type 0x%02x" % link_type)
|
hackedd/gw2api
|
gw2api/items.py
|
item_details
|
python
|
def item_details(item_id, lang="en"):
params = {"item_id": item_id, "lang": lang}
cache_name = "item_details.%(item_id)s.%(lang)s.json" % params
return get_cached("item_details.json", cache_name, params=params)
|
This resource returns a details about a single item.
:param item_id: The item to query for.
:param lang: The language to display the texts in.
The response is an object with at least the following properties. Note that
the availability of some properties depends on the type of the item.
item_id (number):
The item id.
name (string):
The name of the item.
description (string):
The item description.
type (string):
The item type.
level (integer):
The required level.
rarity (string):
The rarity. On of ``Junk``, ``Basic``, ``Fine``, ``Masterwork``,
``Rare``, ``Exotic``, ``Ascended`` or ``Legendary``.
vendor_value (integer):
The value in coins when selling to a vendor.
icon_file_id (string):
The icon file id to be used with the render service.
icon_file_signature (string):
The icon file signature to be used with the render service.
game_types (list):
The game types where the item is usable.
Currently known game types are: ``Activity``, ``Dungeon``, ``Pve``,
``Pvp``, ``PvpLobby`` and ``WvW``
flags (list):
Additional item flags.
Currently known item flags are: ``AccountBound``, ``HideSuffix``,
``NoMysticForge``, ``NoSalvage``, ``NoSell``, ``NotUpgradeable``,
``NoUnderwater``, ``SoulbindOnAcquire``, ``SoulBindOnUse`` and
``Unique``
restrictions (list):
Race restrictions: ``Asura``, ``Charr``, ``Human``, ``Norn`` and
``Sylvari``.
Each item type has an `additional key`_ with information specific to that
item type.
.. _additional key: item-properties.html
|
train
|
https://github.com/hackedd/gw2api/blob/5543a78e6e3ed0573b7e84c142c44004b4779eac/gw2api/items.py#L24-L85
|
[
"def get_cached(path, cache_name=None, **kwargs):\n \"\"\"Request a resource form the API, first checking if there is a cached\n response available. Returns the parsed JSON data.\n \"\"\"\n if gw2api.cache_dir and gw2api.cache_time and cache_name is not False:\n if cache_name is None:\n cache_name = path\n cache_file = os.path.join(gw2api.cache_dir, cache_name)\n if mtime(cache_file) >= time.time() - gw2api.cache_time:\n with open(cache_file, \"r\") as fp:\n return json.load(fp)\n else:\n cache_file = None\n\n r = gw2api.session.get(gw2api.BASE_URL + path, **kwargs)\n\n if not r.ok:\n try:\n response = r.json()\n except ValueError: # pragma: no cover\n response = None\n if isinstance(response, dict) and \"text\" in response:\n r.reason = response[\"text\"]\n\n r.raise_for_status()\n data = r.json()\n\n if cache_file:\n with open(cache_file, \"w\") as fp:\n json.dump(data, fp, indent=2)\n\n return data\n"
] |
from .util import get_cached
__all__ = ("items", "recipes", "item_details", "recipe_details")
def items():
"""This resource returns a list of items that were discovered by players
in the game. Details about a single item can be obtained using the
:func:`item_details` resource.
"""
return get_cached("items.json").get("items")
def recipes():
"""This resource returns a list of recipes that were discovered by players
in the game. Details about a single recipe can be obtained using the
:func:`recipe_details` resource.
"""
return get_cached("recipes.json").get("recipes")
def recipe_details(recipe_id, lang="en"):
"""This resource returns a details about a single recipe.
:param recipe_id: The recipe to query for.
:param lang: The language to display the texts in.
The response is an object with the following properties:
recipe_id (number):
The recipe id.
type (string):
The type of the produced item.
output_item_id (string):
The item id of the produced item.
output_item_count (string):
The amount of items produced.
min_rating (string):
The minimum rating of the recipe.
time_to_craft_ms (string):
The time it takes to craft the item.
disciplines (list):
A list of crafting disciplines that can use the recipe.
flags (list):
Additional recipe flags. Known flags:
``AutoLearned``:
Set for recipes that don't have to be discovered.
``LearnedFromItem``:
Set for recipes that need a recipe sheet.
ingredients (list):
A list of objects describing the ingredients for this recipe. Each
object contains the following properties:
item_id (string):
The item id of the ingredient.
count (string):
The amount of ingredients required.
"""
params = {"recipe_id": recipe_id, "lang": lang}
cache_name = "recipe_details.%(recipe_id)s.%(lang)s.json" % params
return get_cached("recipe_details.json", cache_name, params=params)
|
hackedd/gw2api
|
gw2api/items.py
|
recipe_details
|
python
|
def recipe_details(recipe_id, lang="en"):
params = {"recipe_id": recipe_id, "lang": lang}
cache_name = "recipe_details.%(recipe_id)s.%(lang)s.json" % params
return get_cached("recipe_details.json", cache_name, params=params)
|
This resource returns a details about a single recipe.
:param recipe_id: The recipe to query for.
:param lang: The language to display the texts in.
The response is an object with the following properties:
recipe_id (number):
The recipe id.
type (string):
The type of the produced item.
output_item_id (string):
The item id of the produced item.
output_item_count (string):
The amount of items produced.
min_rating (string):
The minimum rating of the recipe.
time_to_craft_ms (string):
The time it takes to craft the item.
disciplines (list):
A list of crafting disciplines that can use the recipe.
flags (list):
Additional recipe flags. Known flags:
``AutoLearned``:
Set for recipes that don't have to be discovered.
``LearnedFromItem``:
Set for recipes that need a recipe sheet.
ingredients (list):
A list of objects describing the ingredients for this recipe. Each
object contains the following properties:
item_id (string):
The item id of the ingredient.
count (string):
The amount of ingredients required.
|
train
|
https://github.com/hackedd/gw2api/blob/5543a78e6e3ed0573b7e84c142c44004b4779eac/gw2api/items.py#L88-L139
|
[
"def get_cached(path, cache_name=None, **kwargs):\n \"\"\"Request a resource form the API, first checking if there is a cached\n response available. Returns the parsed JSON data.\n \"\"\"\n if gw2api.cache_dir and gw2api.cache_time and cache_name is not False:\n if cache_name is None:\n cache_name = path\n cache_file = os.path.join(gw2api.cache_dir, cache_name)\n if mtime(cache_file) >= time.time() - gw2api.cache_time:\n with open(cache_file, \"r\") as fp:\n return json.load(fp)\n else:\n cache_file = None\n\n r = gw2api.session.get(gw2api.BASE_URL + path, **kwargs)\n\n if not r.ok:\n try:\n response = r.json()\n except ValueError: # pragma: no cover\n response = None\n if isinstance(response, dict) and \"text\" in response:\n r.reason = response[\"text\"]\n\n r.raise_for_status()\n data = r.json()\n\n if cache_file:\n with open(cache_file, \"w\") as fp:\n json.dump(data, fp, indent=2)\n\n return data\n"
] |
from .util import get_cached
__all__ = ("items", "recipes", "item_details", "recipe_details")
def items():
"""This resource returns a list of items that were discovered by players
in the game. Details about a single item can be obtained using the
:func:`item_details` resource.
"""
return get_cached("items.json").get("items")
def recipes():
"""This resource returns a list of recipes that were discovered by players
in the game. Details about a single recipe can be obtained using the
:func:`recipe_details` resource.
"""
return get_cached("recipes.json").get("recipes")
def item_details(item_id, lang="en"):
"""This resource returns a details about a single item.
:param item_id: The item to query for.
:param lang: The language to display the texts in.
The response is an object with at least the following properties. Note that
the availability of some properties depends on the type of the item.
item_id (number):
The item id.
name (string):
The name of the item.
description (string):
The item description.
type (string):
The item type.
level (integer):
The required level.
rarity (string):
The rarity. On of ``Junk``, ``Basic``, ``Fine``, ``Masterwork``,
``Rare``, ``Exotic``, ``Ascended`` or ``Legendary``.
vendor_value (integer):
The value in coins when selling to a vendor.
icon_file_id (string):
The icon file id to be used with the render service.
icon_file_signature (string):
The icon file signature to be used with the render service.
game_types (list):
The game types where the item is usable.
Currently known game types are: ``Activity``, ``Dungeon``, ``Pve``,
``Pvp``, ``PvpLobby`` and ``WvW``
flags (list):
Additional item flags.
Currently known item flags are: ``AccountBound``, ``HideSuffix``,
``NoMysticForge``, ``NoSalvage``, ``NoSell``, ``NotUpgradeable``,
``NoUnderwater``, ``SoulbindOnAcquire``, ``SoulBindOnUse`` and
``Unique``
restrictions (list):
Race restrictions: ``Asura``, ``Charr``, ``Human``, ``Norn`` and
``Sylvari``.
Each item type has an `additional key`_ with information specific to that
item type.
.. _additional key: item-properties.html
"""
params = {"item_id": item_id, "lang": lang}
cache_name = "item_details.%(item_id)s.%(lang)s.json" % params
return get_cached("item_details.json", cache_name, params=params)
|
hackedd/gw2api
|
gw2api/misc.py
|
colors
|
python
|
def colors(lang="en"):
cache_name = "colors.%s.json" % lang
data = get_cached("colors.json", cache_name, params=dict(lang=lang))
return data["colors"]
|
This resource returns all dyes in the game, including localized names
and their color component information.
:param lang: The language to query the names for.
The response is a dictionary where color ids are mapped to an dictionary
containing the following properties:
name (string):
The name of the dye.
base_rgb (list):
The base RGB values.
cloth (object):
Detailed information on its appearance when applied on cloth armor.
leather (object):
Detailed information on its appearance when applied on leather armor.
metal (object):
Detailed information on its appearance when applied on metal armor.
The detailed information object contains the following properties:
brightness (number):
The brightness.
contrast (number):
The contrast.
hue (number):
The hue in the HSL colorspace.
saturation (number):
The saturation in the HSL colorspace.
lightness (number):
The lightness in the HSL colorspace.
rgb (list):
A list containing precalculated RGB values.
|
train
|
https://github.com/hackedd/gw2api/blob/5543a78e6e3ed0573b7e84c142c44004b4779eac/gw2api/misc.py#L15-L62
|
[
"def get_cached(path, cache_name=None, **kwargs):\n \"\"\"Request a resource form the API, first checking if there is a cached\n response available. Returns the parsed JSON data.\n \"\"\"\n if gw2api.cache_dir and gw2api.cache_time and cache_name is not False:\n if cache_name is None:\n cache_name = path\n cache_file = os.path.join(gw2api.cache_dir, cache_name)\n if mtime(cache_file) >= time.time() - gw2api.cache_time:\n with open(cache_file, \"r\") as fp:\n return json.load(fp)\n else:\n cache_file = None\n\n r = gw2api.session.get(gw2api.BASE_URL + path, **kwargs)\n\n if not r.ok:\n try:\n response = r.json()\n except ValueError: # pragma: no cover\n response = None\n if isinstance(response, dict) and \"text\" in response:\n r.reason = response[\"text\"]\n\n r.raise_for_status()\n data = r.json()\n\n if cache_file:\n with open(cache_file, \"w\") as fp:\n json.dump(data, fp, indent=2)\n\n return data\n"
] |
from .util import get_cached
__all__ = ("build", "colors", "files")
def build():
"""This resource returns the current build id of the game.
The result of this function is not cached.
"""
return get_cached("build.json", False).get("build_id")
def files():
"""This resource returns commonly requested in-game assets that may be
used to enhance API-derived applications. The returned information can be
used with the render service to retrieve assets.
The response is an object where file identifiers are mapped to an object
containing the following properties:
file_id (string):
The file id to be used with the render service.
signature (string):
The file signature to be used with the render service.
"""
return get_cached("files.json")
|
hackedd/gw2api
|
gw2api/events.py
|
event_names
|
python
|
def event_names(lang="en"):
cache_name = "event_names.%s.json" % lang
data = get_cached("event_names.json", cache_name, params=dict(lang=lang))
return dict([(event["id"], event["name"]) for event in data])
|
This resource returns an unordered list of the localized event names
for the specified language.
:param lang: The language to query the names for.
:return: A dictionary where the key is the event id and the value is the
name of the event in the specified language.
|
train
|
https://github.com/hackedd/gw2api/blob/5543a78e6e3ed0573b7e84c142c44004b4779eac/gw2api/events.py#L7-L18
|
[
"def get_cached(path, cache_name=None, **kwargs):\n \"\"\"Request a resource form the API, first checking if there is a cached\n response available. Returns the parsed JSON data.\n \"\"\"\n if gw2api.cache_dir and gw2api.cache_time and cache_name is not False:\n if cache_name is None:\n cache_name = path\n cache_file = os.path.join(gw2api.cache_dir, cache_name)\n if mtime(cache_file) >= time.time() - gw2api.cache_time:\n with open(cache_file, \"r\") as fp:\n return json.load(fp)\n else:\n cache_file = None\n\n r = gw2api.session.get(gw2api.BASE_URL + path, **kwargs)\n\n if not r.ok:\n try:\n response = r.json()\n except ValueError: # pragma: no cover\n response = None\n if isinstance(response, dict) and \"text\" in response:\n r.reason = response[\"text\"]\n\n r.raise_for_status()\n data = r.json()\n\n if cache_file:\n with open(cache_file, \"w\") as fp:\n json.dump(data, fp, indent=2)\n\n return data\n"
] |
from .util import get_cached
__all__ = ("event_names", "event_details")
def event_details(event_id=None, lang="en"):
"""This resource returns static details about available events.
:param event_id: Only list this event.
:param lang: Show localized texts in the specified language.
The response is a dictionary where the key is the event id, and the value
is a dictionary containing the following properties:
name (string)
The name of the event.
level (int)
The event level.
map_id (int)
The map where the event takes place.
flags (list)
A list of additional flags. Possible flags are:
``group_event``
For group events
``map_wide``
For map-wide events.
location (object)
The location of the event.
type (string)
The type of the event location, can be ``sphere``, ``cylinder`` or
``poly``.
center (list)
X, Y, Z coordinates of the event location.
radius (number) (type ``sphere`` and ``cylinder``)
Radius of the event location.
z_range (list) (type ``poly``)
List of Minimum and Maximum Z coordinate.
points (list) (type ``poly``)
List of Points (X, Y) denoting the event location perimeter.
If a event_id is given, only the values for that event are returned.
"""
if event_id:
cache_name = "event_details.%s.%s.json" % (event_id, lang)
params = {"event_id": event_id, "lang": lang}
else:
cache_name = "event_details.%s.json" % lang
params = {"lang": lang}
data = get_cached("event_details.json", cache_name, params=params)
events = data["events"]
return events.get(event_id) if event_id else events
|
hackedd/gw2api
|
gw2api/events.py
|
event_details
|
python
|
def event_details(event_id=None, lang="en"):
if event_id:
cache_name = "event_details.%s.%s.json" % (event_id, lang)
params = {"event_id": event_id, "lang": lang}
else:
cache_name = "event_details.%s.json" % lang
params = {"lang": lang}
data = get_cached("event_details.json", cache_name, params=params)
events = data["events"]
return events.get(event_id) if event_id else events
|
This resource returns static details about available events.
:param event_id: Only list this event.
:param lang: Show localized texts in the specified language.
The response is a dictionary where the key is the event id, and the value
is a dictionary containing the following properties:
name (string)
The name of the event.
level (int)
The event level.
map_id (int)
The map where the event takes place.
flags (list)
A list of additional flags. Possible flags are:
``group_event``
For group events
``map_wide``
For map-wide events.
location (object)
The location of the event.
type (string)
The type of the event location, can be ``sphere``, ``cylinder`` or
``poly``.
center (list)
X, Y, Z coordinates of the event location.
radius (number) (type ``sphere`` and ``cylinder``)
Radius of the event location.
z_range (list) (type ``poly``)
List of Minimum and Maximum Z coordinate.
points (list) (type ``poly``)
List of Points (X, Y) denoting the event location perimeter.
If a event_id is given, only the values for that event are returned.
|
train
|
https://github.com/hackedd/gw2api/blob/5543a78e6e3ed0573b7e84c142c44004b4779eac/gw2api/events.py#L21-L79
|
[
"def get_cached(path, cache_name=None, **kwargs):\n \"\"\"Request a resource form the API, first checking if there is a cached\n response available. Returns the parsed JSON data.\n \"\"\"\n if gw2api.cache_dir and gw2api.cache_time and cache_name is not False:\n if cache_name is None:\n cache_name = path\n cache_file = os.path.join(gw2api.cache_dir, cache_name)\n if mtime(cache_file) >= time.time() - gw2api.cache_time:\n with open(cache_file, \"r\") as fp:\n return json.load(fp)\n else:\n cache_file = None\n\n r = gw2api.session.get(gw2api.BASE_URL + path, **kwargs)\n\n if not r.ok:\n try:\n response = r.json()\n except ValueError: # pragma: no cover\n response = None\n if isinstance(response, dict) and \"text\" in response:\n r.reason = response[\"text\"]\n\n r.raise_for_status()\n data = r.json()\n\n if cache_file:\n with open(cache_file, \"w\") as fp:\n json.dump(data, fp, indent=2)\n\n return data\n"
] |
from .util import get_cached
__all__ = ("event_names", "event_details")
def event_names(lang="en"):
"""This resource returns an unordered list of the localized event names
for the specified language.
:param lang: The language to query the names for.
:return: A dictionary where the key is the event id and the value is the
name of the event in the specified language.
"""
cache_name = "event_names.%s.json" % lang
data = get_cached("event_names.json", cache_name, params=dict(lang=lang))
return dict([(event["id"], event["name"]) for event in data])
|
hackedd/gw2api
|
gw2api/guild.py
|
guild_details
|
python
|
def guild_details(guild_id=None, name=None):
if guild_id and name:
warnings.warn("both guild_id and name are specified, "
"name will be ignored")
if guild_id:
params = {"guild_id": guild_id}
cache_name = "guild_details.%s.json" % guild_id
elif name:
params = {"guild_name": name}
cache_name = "guild_details.%s.json" % name
else:
raise Exception("specify either guild_id or name")
return get_cached("guild_details.json", cache_name, params=params)
|
This resource returns details about a guild.
:param guild_id: The guild id to query for.
:param name: The guild name to query for.
*Note: Only one parameter is required; if both are set, the guild Id takes
precedence and a warning will be logged.*
The response is a dictionary with the following keys:
guild_id (string):
The guild id.
guild_name (string):
The guild name.
tag (string):
The guild tag.
emblem (object):
If present, it holds detailed information about the guilds emblem.
The emblem dictionary contains the following information:
background_id (number):
The id of the background image.
foreground_id (number):
The id of the foreground image.
flags (list):
A list of additional flags, possible values are:
``FlipBackgroundHorizontal``, ``FlipBackgroundVertical``,
``FlipForegroundHorizontal`` and ``FlipForegroundVertical``.
background_color_id (number):
The background color id.
foreground_primary_color_id (number):
The primary foreground color id.
foreground_secondary_color_id (number):
The secondary foreground color id.
|
train
|
https://github.com/hackedd/gw2api/blob/5543a78e6e3ed0573b7e84c142c44004b4779eac/gw2api/guild.py#L9-L68
|
[
"def get_cached(path, cache_name=None, **kwargs):\n \"\"\"Request a resource form the API, first checking if there is a cached\n response available. Returns the parsed JSON data.\n \"\"\"\n if gw2api.cache_dir and gw2api.cache_time and cache_name is not False:\n if cache_name is None:\n cache_name = path\n cache_file = os.path.join(gw2api.cache_dir, cache_name)\n if mtime(cache_file) >= time.time() - gw2api.cache_time:\n with open(cache_file, \"r\") as fp:\n return json.load(fp)\n else:\n cache_file = None\n\n r = gw2api.session.get(gw2api.BASE_URL + path, **kwargs)\n\n if not r.ok:\n try:\n response = r.json()\n except ValueError: # pragma: no cover\n response = None\n if isinstance(response, dict) and \"text\" in response:\n r.reason = response[\"text\"]\n\n r.raise_for_status()\n data = r.json()\n\n if cache_file:\n with open(cache_file, \"w\") as fp:\n json.dump(data, fp, indent=2)\n\n return data\n"
] |
import warnings
from .util import get_cached
__all__ = ("guild_details", )
|
hackedd/gw2api
|
gw2api/map.py
|
map_names
|
python
|
def map_names(lang="en"):
cache_name = "map_names.%s.json" % lang
data = get_cached("map_names.json", cache_name, params=dict(lang=lang))
return dict([(item["id"], item["name"]) for item in data])
|
This resource returns an dictionary of the localized map names for
the specified language. Only maps with events are listed - if you need a
list of all maps, use ``maps.json`` instead.
:param lang: The language to query the names for.
:return: the response is a dictionary where the key is the map id and the
value is the name of the map in the specified language.
|
train
|
https://github.com/hackedd/gw2api/blob/5543a78e6e3ed0573b7e84c142c44004b4779eac/gw2api/map.py#L35-L47
|
[
"def get_cached(path, cache_name=None, **kwargs):\n \"\"\"Request a resource form the API, first checking if there is a cached\n response available. Returns the parsed JSON data.\n \"\"\"\n if gw2api.cache_dir and gw2api.cache_time and cache_name is not False:\n if cache_name is None:\n cache_name = path\n cache_file = os.path.join(gw2api.cache_dir, cache_name)\n if mtime(cache_file) >= time.time() - gw2api.cache_time:\n with open(cache_file, \"r\") as fp:\n return json.load(fp)\n else:\n cache_file = None\n\n r = gw2api.session.get(gw2api.BASE_URL + path, **kwargs)\n\n if not r.ok:\n try:\n response = r.json()\n except ValueError: # pragma: no cover\n response = None\n if isinstance(response, dict) and \"text\" in response:\n r.reason = response[\"text\"]\n\n r.raise_for_status()\n data = r.json()\n\n if cache_file:\n with open(cache_file, \"w\") as fp:\n json.dump(data, fp, indent=2)\n\n return data\n"
] |
from .util import get_cached
__all__ = ("continents", "map_names", "maps", "map_floor")
def continents():
"""This resource returns static information about the continents used with
the map_floor resource.
The response is a dictionary where the key is the continent id, and the
value is a dictionary containing the following properties:
name (string):
The name of the continent.
continent_dims (dimension):
The width and height dimensions of the continent.
min_zoom (number):
The minimal zoom level for use with the map tile service.
max_zoom (number):
The maximum zoom level for use with the map tile service.
floors (list):
A list of floors available for this continent.
*Note: There are only two continents, Tyria and Mists.*
"""
return get_cached("continents.json").get("continents")
def maps(map_id=None, lang="en"):
"""This resource returns details about maps in the game, including details
about floor and translation data on how to translate between world
coordinates and map coordinates.
:param map_id: Only list this map.
:param lang: Show localized texts in the specified language.
The response is a dictionary where the key is the map id and the value is
a dictionary containing the following properties:
map_name (string)
The map name.
min_level (number)
The minimal level of this map.
max_level (number)
The maximum level of this map.
default_floor (number)
The default floor of this map.
floors (list)
A list of available floors for this map.
region_id (number)
The id of the region this map belongs to.
region_name (string)
The name of the region this map belongs to.
continent_id (number)
The id of the continent this map belongs to.
continent_name (string)
The name of the continent this map belongs to.
map_rect (rect)
The dimensions of the map.
continent_rect (rect)
The dimensions of the map within the continent coordinate system.
If a map_id is given, only the values for that map are returned.
"""
if map_id:
cache_name = "maps.%s.%s.json" % (map_id, lang)
params = {"map_id": map_id, "lang": lang}
else:
cache_name = "maps.%s.json" % lang
params = {"lang": lang}
data = get_cached("maps.json", cache_name, params=params).get("maps")
return data.get(str(map_id)) if map_id else data
def map_floor(continent_id, floor, lang="en"):
"""This resource returns details about a map floor, used to populate a
world map. All coordinates are map coordinates.
The returned data only contains static content. Dynamic content, such as
vendors, is not currently available.
:param continent_id: The continent.
:param floor: The map floor.
:param lang: Show localized texts in the specified language.
The response is an object with the following properties:
texture_dims (dimension)
The dimensions of the texture.
clamped_view (rect)
If present, it represents a rectangle of downloadable textures. Every
tile coordinate outside this rectangle is not available on the tile
server.
regions (object)
A mapping from region id to an object.
Each region object contains the following properties:
name (string)
The region name.
label_coord (coordinate)
The coordinates of the region label.
maps (object)
A mapping from the map id to an object.
Each map object contains the following properties:
name (string)
The map name.
min_level (number)
The minimum level of the map.
max_level (number)
The maximum level of the map.
default_floor (number)
The default floor of the map.
map_rect (rect)
The dimensions of the map.
continent_rect (rect)
The dimensions of the map within the continent coordinate system.
points_of_interest (list)
A list of points of interest (landmarks, waypoints and vistas)
Each points of interest object contains the following properties:
poi_id (number)
The point of interest id.
name (string)
The name of the point of interest.
type (string)
The type. This can be either "landmark" for actual points of
interest, "waypoint" for waypoints, or "vista" for vistas.
floor (number)
The floor of this object.
coord (coordinate)
The coordinates of this object.
tasks (list)
A list of renown hearts.
Each task object contains the following properties:
task_id (number)
The renown heart id.
objective (string)
The objective or name of the heart.
level (number)
The level of the heart.
coord (coordinate)
The coordinates where it takes place.
skill_challenges (list)
A list of skill challenges.
Each skill challenge object contains the following properties:
coord (coordinate)
The coordinates of this skill challenge.
sectors (list)
A list of areas within the map.
Each sector object contains the following properties:
sector_id (number)
The area id.
name (string)
The name of the area.
level (number)
The level of the area.
coord (coordinate)
The coordinates of this area (this is usually the center
position).
Special types:
Dimension properties are two-element lists of width and height.
Coordinate properties are two-element lists of the x and y position.
Rect properties are two-element lists of coordinates of the upper-left and
lower-right coordinates.
"""
cache_name = "map_floor.%s-%s.%s.json" % (continent_id, floor, lang)
params = {"continent_id": continent_id, "floor": floor, "lang": lang}
return get_cached("map_floor.json", cache_name, params=params)
|
hackedd/gw2api
|
gw2api/map.py
|
maps
|
python
|
def maps(map_id=None, lang="en"):
if map_id:
cache_name = "maps.%s.%s.json" % (map_id, lang)
params = {"map_id": map_id, "lang": lang}
else:
cache_name = "maps.%s.json" % lang
params = {"lang": lang}
data = get_cached("maps.json", cache_name, params=params).get("maps")
return data.get(str(map_id)) if map_id else data
|
This resource returns details about maps in the game, including details
about floor and translation data on how to translate between world
coordinates and map coordinates.
:param map_id: Only list this map.
:param lang: Show localized texts in the specified language.
The response is a dictionary where the key is the map id and the value is
a dictionary containing the following properties:
map_name (string)
The map name.
min_level (number)
The minimal level of this map.
max_level (number)
The maximum level of this map.
default_floor (number)
The default floor of this map.
floors (list)
A list of available floors for this map.
region_id (number)
The id of the region this map belongs to.
region_name (string)
The name of the region this map belongs to.
continent_id (number)
The id of the continent this map belongs to.
continent_name (string)
The name of the continent this map belongs to.
map_rect (rect)
The dimensions of the map.
continent_rect (rect)
The dimensions of the map within the continent coordinate system.
If a map_id is given, only the values for that map are returned.
|
train
|
https://github.com/hackedd/gw2api/blob/5543a78e6e3ed0573b7e84c142c44004b4779eac/gw2api/map.py#L50-L105
|
[
"def get_cached(path, cache_name=None, **kwargs):\n \"\"\"Request a resource form the API, first checking if there is a cached\n response available. Returns the parsed JSON data.\n \"\"\"\n if gw2api.cache_dir and gw2api.cache_time and cache_name is not False:\n if cache_name is None:\n cache_name = path\n cache_file = os.path.join(gw2api.cache_dir, cache_name)\n if mtime(cache_file) >= time.time() - gw2api.cache_time:\n with open(cache_file, \"r\") as fp:\n return json.load(fp)\n else:\n cache_file = None\n\n r = gw2api.session.get(gw2api.BASE_URL + path, **kwargs)\n\n if not r.ok:\n try:\n response = r.json()\n except ValueError: # pragma: no cover\n response = None\n if isinstance(response, dict) and \"text\" in response:\n r.reason = response[\"text\"]\n\n r.raise_for_status()\n data = r.json()\n\n if cache_file:\n with open(cache_file, \"w\") as fp:\n json.dump(data, fp, indent=2)\n\n return data\n"
] |
from .util import get_cached
__all__ = ("continents", "map_names", "maps", "map_floor")
def continents():
"""This resource returns static information about the continents used with
the map_floor resource.
The response is a dictionary where the key is the continent id, and the
value is a dictionary containing the following properties:
name (string):
The name of the continent.
continent_dims (dimension):
The width and height dimensions of the continent.
min_zoom (number):
The minimal zoom level for use with the map tile service.
max_zoom (number):
The maximum zoom level for use with the map tile service.
floors (list):
A list of floors available for this continent.
*Note: There are only two continents, Tyria and Mists.*
"""
return get_cached("continents.json").get("continents")
def map_names(lang="en"):
"""This resource returns an dictionary of the localized map names for
the specified language. Only maps with events are listed - if you need a
list of all maps, use ``maps.json`` instead.
:param lang: The language to query the names for.
:return: the response is a dictionary where the key is the map id and the
value is the name of the map in the specified language.
"""
cache_name = "map_names.%s.json" % lang
data = get_cached("map_names.json", cache_name, params=dict(lang=lang))
return dict([(item["id"], item["name"]) for item in data])
def map_floor(continent_id, floor, lang="en"):
"""This resource returns details about a map floor, used to populate a
world map. All coordinates are map coordinates.
The returned data only contains static content. Dynamic content, such as
vendors, is not currently available.
:param continent_id: The continent.
:param floor: The map floor.
:param lang: Show localized texts in the specified language.
The response is an object with the following properties:
texture_dims (dimension)
The dimensions of the texture.
clamped_view (rect)
If present, it represents a rectangle of downloadable textures. Every
tile coordinate outside this rectangle is not available on the tile
server.
regions (object)
A mapping from region id to an object.
Each region object contains the following properties:
name (string)
The region name.
label_coord (coordinate)
The coordinates of the region label.
maps (object)
A mapping from the map id to an object.
Each map object contains the following properties:
name (string)
The map name.
min_level (number)
The minimum level of the map.
max_level (number)
The maximum level of the map.
default_floor (number)
The default floor of the map.
map_rect (rect)
The dimensions of the map.
continent_rect (rect)
The dimensions of the map within the continent coordinate system.
points_of_interest (list)
A list of points of interest (landmarks, waypoints and vistas)
Each points of interest object contains the following properties:
poi_id (number)
The point of interest id.
name (string)
The name of the point of interest.
type (string)
The type. This can be either "landmark" for actual points of
interest, "waypoint" for waypoints, or "vista" for vistas.
floor (number)
The floor of this object.
coord (coordinate)
The coordinates of this object.
tasks (list)
A list of renown hearts.
Each task object contains the following properties:
task_id (number)
The renown heart id.
objective (string)
The objective or name of the heart.
level (number)
The level of the heart.
coord (coordinate)
The coordinates where it takes place.
skill_challenges (list)
A list of skill challenges.
Each skill challenge object contains the following properties:
coord (coordinate)
The coordinates of this skill challenge.
sectors (list)
A list of areas within the map.
Each sector object contains the following properties:
sector_id (number)
The area id.
name (string)
The name of the area.
level (number)
The level of the area.
coord (coordinate)
The coordinates of this area (this is usually the center
position).
Special types:
Dimension properties are two-element lists of width and height.
Coordinate properties are two-element lists of the x and y position.
Rect properties are two-element lists of coordinates of the upper-left and
lower-right coordinates.
"""
cache_name = "map_floor.%s-%s.%s.json" % (continent_id, floor, lang)
params = {"continent_id": continent_id, "floor": floor, "lang": lang}
return get_cached("map_floor.json", cache_name, params=params)
|
hackedd/gw2api
|
gw2api/map.py
|
map_floor
|
python
|
def map_floor(continent_id, floor, lang="en"):
cache_name = "map_floor.%s-%s.%s.json" % (continent_id, floor, lang)
params = {"continent_id": continent_id, "floor": floor, "lang": lang}
return get_cached("map_floor.json", cache_name, params=params)
|
This resource returns details about a map floor, used to populate a
world map. All coordinates are map coordinates.
The returned data only contains static content. Dynamic content, such as
vendors, is not currently available.
:param continent_id: The continent.
:param floor: The map floor.
:param lang: Show localized texts in the specified language.
The response is an object with the following properties:
texture_dims (dimension)
The dimensions of the texture.
clamped_view (rect)
If present, it represents a rectangle of downloadable textures. Every
tile coordinate outside this rectangle is not available on the tile
server.
regions (object)
A mapping from region id to an object.
Each region object contains the following properties:
name (string)
The region name.
label_coord (coordinate)
The coordinates of the region label.
maps (object)
A mapping from the map id to an object.
Each map object contains the following properties:
name (string)
The map name.
min_level (number)
The minimum level of the map.
max_level (number)
The maximum level of the map.
default_floor (number)
The default floor of the map.
map_rect (rect)
The dimensions of the map.
continent_rect (rect)
The dimensions of the map within the continent coordinate system.
points_of_interest (list)
A list of points of interest (landmarks, waypoints and vistas)
Each points of interest object contains the following properties:
poi_id (number)
The point of interest id.
name (string)
The name of the point of interest.
type (string)
The type. This can be either "landmark" for actual points of
interest, "waypoint" for waypoints, or "vista" for vistas.
floor (number)
The floor of this object.
coord (coordinate)
The coordinates of this object.
tasks (list)
A list of renown hearts.
Each task object contains the following properties:
task_id (number)
The renown heart id.
objective (string)
The objective or name of the heart.
level (number)
The level of the heart.
coord (coordinate)
The coordinates where it takes place.
skill_challenges (list)
A list of skill challenges.
Each skill challenge object contains the following properties:
coord (coordinate)
The coordinates of this skill challenge.
sectors (list)
A list of areas within the map.
Each sector object contains the following properties:
sector_id (number)
The area id.
name (string)
The name of the area.
level (number)
The level of the area.
coord (coordinate)
The coordinates of this area (this is usually the center
position).
Special types:
Dimension properties are two-element lists of width and height.
Coordinate properties are two-element lists of the x and y position.
Rect properties are two-element lists of coordinates of the upper-left and
lower-right coordinates.
|
train
|
https://github.com/hackedd/gw2api/blob/5543a78e6e3ed0573b7e84c142c44004b4779eac/gw2api/map.py#L108-L236
|
[
"def get_cached(path, cache_name=None, **kwargs):\n \"\"\"Request a resource form the API, first checking if there is a cached\n response available. Returns the parsed JSON data.\n \"\"\"\n if gw2api.cache_dir and gw2api.cache_time and cache_name is not False:\n if cache_name is None:\n cache_name = path\n cache_file = os.path.join(gw2api.cache_dir, cache_name)\n if mtime(cache_file) >= time.time() - gw2api.cache_time:\n with open(cache_file, \"r\") as fp:\n return json.load(fp)\n else:\n cache_file = None\n\n r = gw2api.session.get(gw2api.BASE_URL + path, **kwargs)\n\n if not r.ok:\n try:\n response = r.json()\n except ValueError: # pragma: no cover\n response = None\n if isinstance(response, dict) and \"text\" in response:\n r.reason = response[\"text\"]\n\n r.raise_for_status()\n data = r.json()\n\n if cache_file:\n with open(cache_file, \"w\") as fp:\n json.dump(data, fp, indent=2)\n\n return data\n"
] |
from .util import get_cached
__all__ = ("continents", "map_names", "maps", "map_floor")
def continents():
"""This resource returns static information about the continents used with
the map_floor resource.
The response is a dictionary where the key is the continent id, and the
value is a dictionary containing the following properties:
name (string):
The name of the continent.
continent_dims (dimension):
The width and height dimensions of the continent.
min_zoom (number):
The minimal zoom level for use with the map tile service.
max_zoom (number):
The maximum zoom level for use with the map tile service.
floors (list):
A list of floors available for this continent.
*Note: There are only two continents, Tyria and Mists.*
"""
return get_cached("continents.json").get("continents")
def map_names(lang="en"):
"""This resource returns an dictionary of the localized map names for
the specified language. Only maps with events are listed - if you need a
list of all maps, use ``maps.json`` instead.
:param lang: The language to query the names for.
:return: the response is a dictionary where the key is the map id and the
value is the name of the map in the specified language.
"""
cache_name = "map_names.%s.json" % lang
data = get_cached("map_names.json", cache_name, params=dict(lang=lang))
return dict([(item["id"], item["name"]) for item in data])
def maps(map_id=None, lang="en"):
"""This resource returns details about maps in the game, including details
about floor and translation data on how to translate between world
coordinates and map coordinates.
:param map_id: Only list this map.
:param lang: Show localized texts in the specified language.
The response is a dictionary where the key is the map id and the value is
a dictionary containing the following properties:
map_name (string)
The map name.
min_level (number)
The minimal level of this map.
max_level (number)
The maximum level of this map.
default_floor (number)
The default floor of this map.
floors (list)
A list of available floors for this map.
region_id (number)
The id of the region this map belongs to.
region_name (string)
The name of the region this map belongs to.
continent_id (number)
The id of the continent this map belongs to.
continent_name (string)
The name of the continent this map belongs to.
map_rect (rect)
The dimensions of the map.
continent_rect (rect)
The dimensions of the map within the continent coordinate system.
If a map_id is given, only the values for that map are returned.
"""
if map_id:
cache_name = "maps.%s.%s.json" % (map_id, lang)
params = {"map_id": map_id, "lang": lang}
else:
cache_name = "maps.%s.json" % lang
params = {"lang": lang}
data = get_cached("maps.json", cache_name, params=params).get("maps")
return data.get(str(map_id)) if map_id else data
|
NickMonzillo/SmartCloud
|
SmartCloud/wordplay.py
|
separate
|
python
|
def separate(text):
'''Takes text and separates it into a list of words'''
alphabet = 'abcdefghijklmnopqrstuvwxyz'
words = text.split()
standardwords = []
for word in words:
newstr = ''
for char in word:
if char in alphabet or char in alphabet.upper():
newstr += char
if newstr != '':
standardwords.append(newstr)
return map(lambda x: x.lower(),standardwords)
|
Takes text and separates it into a list of words
|
train
|
https://github.com/NickMonzillo/SmartCloud/blob/481d1ef428427b452a8a787999c1d4a8868a3824/SmartCloud/wordplay.py#L1-L13
| null |
def read_file(filename):
'''Reads in a .txt file.'''
with open(filename,'r') as f:
content = f.read()
return content
def eliminate_repeats(text):
'''Returns a list of words that occur in the text. Eliminates stopwords.'''
bannedwords = read_file('stopwords.txt')
alphabet = 'abcdefghijklmnopqrstuvwxyz'
words = text.split()
standardwords = []
for word in words:
newstr = ''
for char in word:
if char in alphabet or char in alphabet.upper():
newstr += char
if newstr not in standardwords and newstr != '' and newstr not in bannedwords:
standardwords.append(newstr)
return map(lambda x: x.lower(),standardwords)
def wordcount(text):
'''Returns the count of the words in a file.'''
bannedwords = read_file('stopwords.txt')
wordcount = {}
separated = separate(text)
for word in separated:
if word not in bannedwords:
if not wordcount.has_key(word):
wordcount[word] = 1
else:
wordcount[word] += 1
return wordcount
def tuplecount(text):
'''Changes a dictionary into a list of tuples.'''
worddict = wordcount(text)
countlist = []
for key in worddict.keys():
countlist.append((key,worddict[key]))
countlist = list(reversed(sorted(countlist,key = lambda x: x[1])))
return countlist
|
NickMonzillo/SmartCloud
|
SmartCloud/wordplay.py
|
eliminate_repeats
|
python
|
def eliminate_repeats(text):
'''Returns a list of words that occur in the text. Eliminates stopwords.'''
bannedwords = read_file('stopwords.txt')
alphabet = 'abcdefghijklmnopqrstuvwxyz'
words = text.split()
standardwords = []
for word in words:
newstr = ''
for char in word:
if char in alphabet or char in alphabet.upper():
newstr += char
if newstr not in standardwords and newstr != '' and newstr not in bannedwords:
standardwords.append(newstr)
return map(lambda x: x.lower(),standardwords)
|
Returns a list of words that occur in the text. Eliminates stopwords.
|
train
|
https://github.com/NickMonzillo/SmartCloud/blob/481d1ef428427b452a8a787999c1d4a8868a3824/SmartCloud/wordplay.py#L21-L36
|
[
"def read_file(filename):\n '''Reads in a .txt file.'''\n with open(filename,'r') as f:\n content = f.read()\n return content\n"
] |
def separate(text):
'''Takes text and separates it into a list of words'''
alphabet = 'abcdefghijklmnopqrstuvwxyz'
words = text.split()
standardwords = []
for word in words:
newstr = ''
for char in word:
if char in alphabet or char in alphabet.upper():
newstr += char
if newstr != '':
standardwords.append(newstr)
return map(lambda x: x.lower(),standardwords)
def read_file(filename):
'''Reads in a .txt file.'''
with open(filename,'r') as f:
content = f.read()
return content
def eliminate_repeats(text):
'''Returns a list of words that occur in the text. Eliminates stopwords.'''
bannedwords = read_file('stopwords.txt')
alphabet = 'abcdefghijklmnopqrstuvwxyz'
words = text.split()
standardwords = []
for word in words:
newstr = ''
for char in word:
if char in alphabet or char in alphabet.upper():
newstr += char
if newstr not in standardwords and newstr != '' and newstr not in bannedwords:
standardwords.append(newstr)
return map(lambda x: x.lower(),standardwords)
def wordcount(text):
'''Returns the count of the words in a file.'''
bannedwords = read_file('stopwords.txt')
wordcount = {}
separated = separate(text)
for word in separated:
if word not in bannedwords:
if not wordcount.has_key(word):
wordcount[word] = 1
else:
wordcount[word] += 1
return wordcount
def tuplecount(text):
'''Changes a dictionary into a list of tuples.'''
worddict = wordcount(text)
countlist = []
for key in worddict.keys():
countlist.append((key,worddict[key]))
countlist = list(reversed(sorted(countlist,key = lambda x: x[1])))
return countlist
|
NickMonzillo/SmartCloud
|
SmartCloud/wordplay.py
|
wordcount
|
python
|
def wordcount(text):
'''Returns the count of the words in a file.'''
bannedwords = read_file('stopwords.txt')
wordcount = {}
separated = separate(text)
for word in separated:
if word not in bannedwords:
if not wordcount.has_key(word):
wordcount[word] = 1
else:
wordcount[word] += 1
return wordcount
|
Returns the count of the words in a file.
|
train
|
https://github.com/NickMonzillo/SmartCloud/blob/481d1ef428427b452a8a787999c1d4a8868a3824/SmartCloud/wordplay.py#L38-L49
|
[
"def separate(text):\n '''Takes text and separates it into a list of words'''\n alphabet = 'abcdefghijklmnopqrstuvwxyz'\n words = text.split()\n standardwords = []\n for word in words:\n newstr = ''\n for char in word:\n if char in alphabet or char in alphabet.upper():\n newstr += char\n if newstr != '':\n standardwords.append(newstr)\n return map(lambda x: x.lower(),standardwords)\n",
"def read_file(filename):\n '''Reads in a .txt file.'''\n with open(filename,'r') as f:\n content = f.read()\n return content\n"
] |
def separate(text):
'''Takes text and separates it into a list of words'''
alphabet = 'abcdefghijklmnopqrstuvwxyz'
words = text.split()
standardwords = []
for word in words:
newstr = ''
for char in word:
if char in alphabet or char in alphabet.upper():
newstr += char
if newstr != '':
standardwords.append(newstr)
return map(lambda x: x.lower(),standardwords)
def read_file(filename):
'''Reads in a .txt file.'''
with open(filename,'r') as f:
content = f.read()
return content
def eliminate_repeats(text):
'''Returns a list of words that occur in the text. Eliminates stopwords.'''
bannedwords = read_file('stopwords.txt')
alphabet = 'abcdefghijklmnopqrstuvwxyz'
words = text.split()
standardwords = []
for word in words:
newstr = ''
for char in word:
if char in alphabet or char in alphabet.upper():
newstr += char
if newstr not in standardwords and newstr != '' and newstr not in bannedwords:
standardwords.append(newstr)
return map(lambda x: x.lower(),standardwords)
def tuplecount(text):
'''Changes a dictionary into a list of tuples.'''
worddict = wordcount(text)
countlist = []
for key in worddict.keys():
countlist.append((key,worddict[key]))
countlist = list(reversed(sorted(countlist,key = lambda x: x[1])))
return countlist
|
NickMonzillo/SmartCloud
|
SmartCloud/wordplay.py
|
tuplecount
|
python
|
def tuplecount(text):
'''Changes a dictionary into a list of tuples.'''
worddict = wordcount(text)
countlist = []
for key in worddict.keys():
countlist.append((key,worddict[key]))
countlist = list(reversed(sorted(countlist,key = lambda x: x[1])))
return countlist
|
Changes a dictionary into a list of tuples.
|
train
|
https://github.com/NickMonzillo/SmartCloud/blob/481d1ef428427b452a8a787999c1d4a8868a3824/SmartCloud/wordplay.py#L51-L58
|
[
"def wordcount(text):\n '''Returns the count of the words in a file.'''\n bannedwords = read_file('stopwords.txt')\n wordcount = {}\n separated = separate(text)\n for word in separated:\n if word not in bannedwords:\n if not wordcount.has_key(word):\n wordcount[word] = 1\n else:\n wordcount[word] += 1\n return wordcount\n"
] |
def separate(text):
'''Takes text and separates it into a list of words'''
alphabet = 'abcdefghijklmnopqrstuvwxyz'
words = text.split()
standardwords = []
for word in words:
newstr = ''
for char in word:
if char in alphabet or char in alphabet.upper():
newstr += char
if newstr != '':
standardwords.append(newstr)
return map(lambda x: x.lower(),standardwords)
def read_file(filename):
'''Reads in a .txt file.'''
with open(filename,'r') as f:
content = f.read()
return content
def eliminate_repeats(text):
'''Returns a list of words that occur in the text. Eliminates stopwords.'''
bannedwords = read_file('stopwords.txt')
alphabet = 'abcdefghijklmnopqrstuvwxyz'
words = text.split()
standardwords = []
for word in words:
newstr = ''
for char in word:
if char in alphabet or char in alphabet.upper():
newstr += char
if newstr not in standardwords and newstr != '' and newstr not in bannedwords:
standardwords.append(newstr)
return map(lambda x: x.lower(),standardwords)
def wordcount(text):
'''Returns the count of the words in a file.'''
bannedwords = read_file('stopwords.txt')
wordcount = {}
separated = separate(text)
for word in separated:
if word not in bannedwords:
if not wordcount.has_key(word):
wordcount[word] = 1
else:
wordcount[word] += 1
return wordcount
|
NickMonzillo/SmartCloud
|
SmartCloud/__init__.py
|
Cloud.render_word
|
python
|
def render_word(self,word,size,color):
'''Creates a surface that contains a word.'''
pygame.font.init()
font = pygame.font.Font(None,size)
self.rendered_word = font.render(word,0,color)
self.word_size = font.size(word)
|
Creates a surface that contains a word.
|
train
|
https://github.com/NickMonzillo/SmartCloud/blob/481d1ef428427b452a8a787999c1d4a8868a3824/SmartCloud/__init__.py#L16-L21
| null |
class Cloud(object):
def __init__(self,width=500,height=500):
pygame.init()
pygame.font.init()
self.width = width
self.height = height
self.cloud = pygame.Surface((width,height))
self.used_pos = []
def plot_word(self,position):
'''Blits a rendered word on to the main display surface'''
posrectangle = pygame.Rect(position,self.word_size)
self.used_pos.append(posrectangle)
self.cloud.blit(self.rendered_word,position)
def collides(self,position,size):
'''Returns True if the word collides with another plotted word.'''
word_rect = pygame.Rect(position,self.word_size)
if word_rect.collidelistall(self.used_pos) == []:
return False
else:
return True
def expand(self,delta_width,delta_height):
'''Makes the cloud surface bigger. Maintains all word positions.'''
temp_surface = pygame.Surface((self.width + delta_width,self.height + delta_height))
(self.width,self.height) = (self.width + delta_width, self.height + delta_height)
temp_surface.blit(self.cloud,(0,0))
self.cloud = temp_surface
def smart_cloud(self,input,max_text_size=72,min_text_size=12,exclude_words = True):
'''Creates a word cloud using the input.
Input can be a file, directory, or text.
Set exclude_words to true if you want to eliminate words that only occur once.'''
self.exclude_words = exclude_words
if isdir(input):
self.directory_cloud(input,max_text_size,min_text_size)
elif isfile(input):
text = read_file(input)
self.text_cloud(text,max_text_size,min_text_size)
elif isinstance(input, basestring):
self.text_cloud(input,max_text_size,min_text_size)
else:
print 'Input type not supported.'
print 'Supported types: String, Directory, .txt file'
def directory_cloud(self,directory,max_text_size=72,min_text_size=12,expand_width=50,expand_height=50,max_count=100000):
'''Creates a word cloud using files from a directory.
The color of the words correspond to the amount of documents the word occurs in.'''
worddict = assign_fonts(tuplecount(read_dir(directory)),max_text_size,min_text_size,self.exclude_words)
sorted_worddict = list(reversed(sorted(worddict.keys(), key=lambda x: worddict[x])))
colordict = assign_colors(dir_freq(directory))
num_words = 0
for word in sorted_worddict:
self.render_word(word,worddict[word],colordict[word])
if self.width < self.word_size[0]:
#If the word is bigger than the surface, expand the surface.
self.expand(self.word_size[0]-self.width,0)
elif self.height < self.word_size[1]:
self.expand(0,self.word_size[1]-self.height)
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
#the initial position is determined
loopcount = 0
while self.collides(position,self.word_size):
if loopcount > max_count:
#If it can't find a position for the word, create a bigger cloud.
self.expand(expand_width,expand_height)
loopcount = 0
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
loopcount += 1
self.plot_word(position)
num_words += 1
def text_cloud(self,text,max_text_size=72,min_text_size=12,expand_width=50,expand_height=50,max_count=100000):
'''Creates a word cloud using plain text.'''
worddict = assign_fonts(tuplecount(text),max_text_size,min_text_size,self.exclude_words)
sorted_worddict = list(reversed(sorted(worddict.keys(), key=lambda x: worddict[x])))
for word in sorted_worddict:
self.render_word(word,worddict[word],(randint(0,255),randint(0,255),randint(0,255)))
if self.width < self.word_size[0]:
#If the word is bigger than the surface, expand the surface.
self.expand(self.word_size[0]-self.width,0)
elif self.height < self.word_size[1]:
self.expand(0,self.word_size[1]-self.height)
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
loopcount = 0
while self.collides(position,self.word_size):
if loopcount > max_count:
#If it can't find a position for the word, expand the cloud.
self.expand(expand_width,expand_height)
loopcount = 0
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
loopcount += 1
self.plot_word(position)
def display(self):
'''Displays the word cloud to the screen.'''
pygame.init()
self.display = pygame.display.set_mode((self.width,self.height))
self.display.blit(self.cloud,(0,0))
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
return
def save(self,filename):
'''Saves the cloud to a file.'''
pygame.image.save(self.cloud,filename)
|
NickMonzillo/SmartCloud
|
SmartCloud/__init__.py
|
Cloud.plot_word
|
python
|
def plot_word(self,position):
'''Blits a rendered word on to the main display surface'''
posrectangle = pygame.Rect(position,self.word_size)
self.used_pos.append(posrectangle)
self.cloud.blit(self.rendered_word,position)
|
Blits a rendered word on to the main display surface
|
train
|
https://github.com/NickMonzillo/SmartCloud/blob/481d1ef428427b452a8a787999c1d4a8868a3824/SmartCloud/__init__.py#L23-L27
| null |
class Cloud(object):
def __init__(self,width=500,height=500):
pygame.init()
pygame.font.init()
self.width = width
self.height = height
self.cloud = pygame.Surface((width,height))
self.used_pos = []
def render_word(self,word,size,color):
'''Creates a surface that contains a word.'''
pygame.font.init()
font = pygame.font.Font(None,size)
self.rendered_word = font.render(word,0,color)
self.word_size = font.size(word)
def collides(self,position,size):
'''Returns True if the word collides with another plotted word.'''
word_rect = pygame.Rect(position,self.word_size)
if word_rect.collidelistall(self.used_pos) == []:
return False
else:
return True
def expand(self,delta_width,delta_height):
'''Makes the cloud surface bigger. Maintains all word positions.'''
temp_surface = pygame.Surface((self.width + delta_width,self.height + delta_height))
(self.width,self.height) = (self.width + delta_width, self.height + delta_height)
temp_surface.blit(self.cloud,(0,0))
self.cloud = temp_surface
def smart_cloud(self,input,max_text_size=72,min_text_size=12,exclude_words = True):
'''Creates a word cloud using the input.
Input can be a file, directory, or text.
Set exclude_words to true if you want to eliminate words that only occur once.'''
self.exclude_words = exclude_words
if isdir(input):
self.directory_cloud(input,max_text_size,min_text_size)
elif isfile(input):
text = read_file(input)
self.text_cloud(text,max_text_size,min_text_size)
elif isinstance(input, basestring):
self.text_cloud(input,max_text_size,min_text_size)
else:
print 'Input type not supported.'
print 'Supported types: String, Directory, .txt file'
def directory_cloud(self,directory,max_text_size=72,min_text_size=12,expand_width=50,expand_height=50,max_count=100000):
'''Creates a word cloud using files from a directory.
The color of the words correspond to the amount of documents the word occurs in.'''
worddict = assign_fonts(tuplecount(read_dir(directory)),max_text_size,min_text_size,self.exclude_words)
sorted_worddict = list(reversed(sorted(worddict.keys(), key=lambda x: worddict[x])))
colordict = assign_colors(dir_freq(directory))
num_words = 0
for word in sorted_worddict:
self.render_word(word,worddict[word],colordict[word])
if self.width < self.word_size[0]:
#If the word is bigger than the surface, expand the surface.
self.expand(self.word_size[0]-self.width,0)
elif self.height < self.word_size[1]:
self.expand(0,self.word_size[1]-self.height)
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
#the initial position is determined
loopcount = 0
while self.collides(position,self.word_size):
if loopcount > max_count:
#If it can't find a position for the word, create a bigger cloud.
self.expand(expand_width,expand_height)
loopcount = 0
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
loopcount += 1
self.plot_word(position)
num_words += 1
def text_cloud(self,text,max_text_size=72,min_text_size=12,expand_width=50,expand_height=50,max_count=100000):
'''Creates a word cloud using plain text.'''
worddict = assign_fonts(tuplecount(text),max_text_size,min_text_size,self.exclude_words)
sorted_worddict = list(reversed(sorted(worddict.keys(), key=lambda x: worddict[x])))
for word in sorted_worddict:
self.render_word(word,worddict[word],(randint(0,255),randint(0,255),randint(0,255)))
if self.width < self.word_size[0]:
#If the word is bigger than the surface, expand the surface.
self.expand(self.word_size[0]-self.width,0)
elif self.height < self.word_size[1]:
self.expand(0,self.word_size[1]-self.height)
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
loopcount = 0
while self.collides(position,self.word_size):
if loopcount > max_count:
#If it can't find a position for the word, expand the cloud.
self.expand(expand_width,expand_height)
loopcount = 0
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
loopcount += 1
self.plot_word(position)
def display(self):
'''Displays the word cloud to the screen.'''
pygame.init()
self.display = pygame.display.set_mode((self.width,self.height))
self.display.blit(self.cloud,(0,0))
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
return
def save(self,filename):
'''Saves the cloud to a file.'''
pygame.image.save(self.cloud,filename)
|
NickMonzillo/SmartCloud
|
SmartCloud/__init__.py
|
Cloud.collides
|
python
|
def collides(self,position,size):
'''Returns True if the word collides with another plotted word.'''
word_rect = pygame.Rect(position,self.word_size)
if word_rect.collidelistall(self.used_pos) == []:
return False
else:
return True
|
Returns True if the word collides with another plotted word.
|
train
|
https://github.com/NickMonzillo/SmartCloud/blob/481d1ef428427b452a8a787999c1d4a8868a3824/SmartCloud/__init__.py#L29-L35
| null |
class Cloud(object):
def __init__(self,width=500,height=500):
pygame.init()
pygame.font.init()
self.width = width
self.height = height
self.cloud = pygame.Surface((width,height))
self.used_pos = []
def render_word(self,word,size,color):
'''Creates a surface that contains a word.'''
pygame.font.init()
font = pygame.font.Font(None,size)
self.rendered_word = font.render(word,0,color)
self.word_size = font.size(word)
def plot_word(self,position):
'''Blits a rendered word on to the main display surface'''
posrectangle = pygame.Rect(position,self.word_size)
self.used_pos.append(posrectangle)
self.cloud.blit(self.rendered_word,position)
def expand(self,delta_width,delta_height):
'''Makes the cloud surface bigger. Maintains all word positions.'''
temp_surface = pygame.Surface((self.width + delta_width,self.height + delta_height))
(self.width,self.height) = (self.width + delta_width, self.height + delta_height)
temp_surface.blit(self.cloud,(0,0))
self.cloud = temp_surface
def smart_cloud(self,input,max_text_size=72,min_text_size=12,exclude_words = True):
'''Creates a word cloud using the input.
Input can be a file, directory, or text.
Set exclude_words to true if you want to eliminate words that only occur once.'''
self.exclude_words = exclude_words
if isdir(input):
self.directory_cloud(input,max_text_size,min_text_size)
elif isfile(input):
text = read_file(input)
self.text_cloud(text,max_text_size,min_text_size)
elif isinstance(input, basestring):
self.text_cloud(input,max_text_size,min_text_size)
else:
print 'Input type not supported.'
print 'Supported types: String, Directory, .txt file'
def directory_cloud(self,directory,max_text_size=72,min_text_size=12,expand_width=50,expand_height=50,max_count=100000):
'''Creates a word cloud using files from a directory.
The color of the words correspond to the amount of documents the word occurs in.'''
worddict = assign_fonts(tuplecount(read_dir(directory)),max_text_size,min_text_size,self.exclude_words)
sorted_worddict = list(reversed(sorted(worddict.keys(), key=lambda x: worddict[x])))
colordict = assign_colors(dir_freq(directory))
num_words = 0
for word in sorted_worddict:
self.render_word(word,worddict[word],colordict[word])
if self.width < self.word_size[0]:
#If the word is bigger than the surface, expand the surface.
self.expand(self.word_size[0]-self.width,0)
elif self.height < self.word_size[1]:
self.expand(0,self.word_size[1]-self.height)
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
#the initial position is determined
loopcount = 0
while self.collides(position,self.word_size):
if loopcount > max_count:
#If it can't find a position for the word, create a bigger cloud.
self.expand(expand_width,expand_height)
loopcount = 0
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
loopcount += 1
self.plot_word(position)
num_words += 1
def text_cloud(self,text,max_text_size=72,min_text_size=12,expand_width=50,expand_height=50,max_count=100000):
'''Creates a word cloud using plain text.'''
worddict = assign_fonts(tuplecount(text),max_text_size,min_text_size,self.exclude_words)
sorted_worddict = list(reversed(sorted(worddict.keys(), key=lambda x: worddict[x])))
for word in sorted_worddict:
self.render_word(word,worddict[word],(randint(0,255),randint(0,255),randint(0,255)))
if self.width < self.word_size[0]:
#If the word is bigger than the surface, expand the surface.
self.expand(self.word_size[0]-self.width,0)
elif self.height < self.word_size[1]:
self.expand(0,self.word_size[1]-self.height)
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
loopcount = 0
while self.collides(position,self.word_size):
if loopcount > max_count:
#If it can't find a position for the word, expand the cloud.
self.expand(expand_width,expand_height)
loopcount = 0
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
loopcount += 1
self.plot_word(position)
def display(self):
'''Displays the word cloud to the screen.'''
pygame.init()
self.display = pygame.display.set_mode((self.width,self.height))
self.display.blit(self.cloud,(0,0))
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
return
def save(self,filename):
'''Saves the cloud to a file.'''
pygame.image.save(self.cloud,filename)
|
NickMonzillo/SmartCloud
|
SmartCloud/__init__.py
|
Cloud.expand
|
python
|
def expand(self,delta_width,delta_height):
'''Makes the cloud surface bigger. Maintains all word positions.'''
temp_surface = pygame.Surface((self.width + delta_width,self.height + delta_height))
(self.width,self.height) = (self.width + delta_width, self.height + delta_height)
temp_surface.blit(self.cloud,(0,0))
self.cloud = temp_surface
|
Makes the cloud surface bigger. Maintains all word positions.
|
train
|
https://github.com/NickMonzillo/SmartCloud/blob/481d1ef428427b452a8a787999c1d4a8868a3824/SmartCloud/__init__.py#L37-L42
| null |
class Cloud(object):
def __init__(self,width=500,height=500):
pygame.init()
pygame.font.init()
self.width = width
self.height = height
self.cloud = pygame.Surface((width,height))
self.used_pos = []
def render_word(self,word,size,color):
'''Creates a surface that contains a word.'''
pygame.font.init()
font = pygame.font.Font(None,size)
self.rendered_word = font.render(word,0,color)
self.word_size = font.size(word)
def plot_word(self,position):
'''Blits a rendered word on to the main display surface'''
posrectangle = pygame.Rect(position,self.word_size)
self.used_pos.append(posrectangle)
self.cloud.blit(self.rendered_word,position)
def collides(self,position,size):
'''Returns True if the word collides with another plotted word.'''
word_rect = pygame.Rect(position,self.word_size)
if word_rect.collidelistall(self.used_pos) == []:
return False
else:
return True
def smart_cloud(self,input,max_text_size=72,min_text_size=12,exclude_words = True):
'''Creates a word cloud using the input.
Input can be a file, directory, or text.
Set exclude_words to true if you want to eliminate words that only occur once.'''
self.exclude_words = exclude_words
if isdir(input):
self.directory_cloud(input,max_text_size,min_text_size)
elif isfile(input):
text = read_file(input)
self.text_cloud(text,max_text_size,min_text_size)
elif isinstance(input, basestring):
self.text_cloud(input,max_text_size,min_text_size)
else:
print 'Input type not supported.'
print 'Supported types: String, Directory, .txt file'
def directory_cloud(self,directory,max_text_size=72,min_text_size=12,expand_width=50,expand_height=50,max_count=100000):
'''Creates a word cloud using files from a directory.
The color of the words correspond to the amount of documents the word occurs in.'''
worddict = assign_fonts(tuplecount(read_dir(directory)),max_text_size,min_text_size,self.exclude_words)
sorted_worddict = list(reversed(sorted(worddict.keys(), key=lambda x: worddict[x])))
colordict = assign_colors(dir_freq(directory))
num_words = 0
for word in sorted_worddict:
self.render_word(word,worddict[word],colordict[word])
if self.width < self.word_size[0]:
#If the word is bigger than the surface, expand the surface.
self.expand(self.word_size[0]-self.width,0)
elif self.height < self.word_size[1]:
self.expand(0,self.word_size[1]-self.height)
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
#the initial position is determined
loopcount = 0
while self.collides(position,self.word_size):
if loopcount > max_count:
#If it can't find a position for the word, create a bigger cloud.
self.expand(expand_width,expand_height)
loopcount = 0
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
loopcount += 1
self.plot_word(position)
num_words += 1
def text_cloud(self,text,max_text_size=72,min_text_size=12,expand_width=50,expand_height=50,max_count=100000):
'''Creates a word cloud using plain text.'''
worddict = assign_fonts(tuplecount(text),max_text_size,min_text_size,self.exclude_words)
sorted_worddict = list(reversed(sorted(worddict.keys(), key=lambda x: worddict[x])))
for word in sorted_worddict:
self.render_word(word,worddict[word],(randint(0,255),randint(0,255),randint(0,255)))
if self.width < self.word_size[0]:
#If the word is bigger than the surface, expand the surface.
self.expand(self.word_size[0]-self.width,0)
elif self.height < self.word_size[1]:
self.expand(0,self.word_size[1]-self.height)
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
loopcount = 0
while self.collides(position,self.word_size):
if loopcount > max_count:
#If it can't find a position for the word, expand the cloud.
self.expand(expand_width,expand_height)
loopcount = 0
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
loopcount += 1
self.plot_word(position)
def display(self):
'''Displays the word cloud to the screen.'''
pygame.init()
self.display = pygame.display.set_mode((self.width,self.height))
self.display.blit(self.cloud,(0,0))
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
return
def save(self,filename):
'''Saves the cloud to a file.'''
pygame.image.save(self.cloud,filename)
|
NickMonzillo/SmartCloud
|
SmartCloud/__init__.py
|
Cloud.smart_cloud
|
python
|
def smart_cloud(self,input,max_text_size=72,min_text_size=12,exclude_words = True):
'''Creates a word cloud using the input.
Input can be a file, directory, or text.
Set exclude_words to true if you want to eliminate words that only occur once.'''
self.exclude_words = exclude_words
if isdir(input):
self.directory_cloud(input,max_text_size,min_text_size)
elif isfile(input):
text = read_file(input)
self.text_cloud(text,max_text_size,min_text_size)
elif isinstance(input, basestring):
self.text_cloud(input,max_text_size,min_text_size)
else:
print 'Input type not supported.'
print 'Supported types: String, Directory, .txt file'
|
Creates a word cloud using the input.
Input can be a file, directory, or text.
Set exclude_words to true if you want to eliminate words that only occur once.
|
train
|
https://github.com/NickMonzillo/SmartCloud/blob/481d1ef428427b452a8a787999c1d4a8868a3824/SmartCloud/__init__.py#L44-L58
|
[
"def read_file(filename):\n '''Reads in a .txt file.'''\n with open(filename,'r') as f:\n content = f.read()\n return content\n",
"def directory_cloud(self,directory,max_text_size=72,min_text_size=12,expand_width=50,expand_height=50,max_count=100000):\n '''Creates a word cloud using files from a directory.\n The color of the words correspond to the amount of documents the word occurs in.'''\n worddict = assign_fonts(tuplecount(read_dir(directory)),max_text_size,min_text_size,self.exclude_words)\n sorted_worddict = list(reversed(sorted(worddict.keys(), key=lambda x: worddict[x])))\n colordict = assign_colors(dir_freq(directory))\n num_words = 0\n for word in sorted_worddict:\n self.render_word(word,worddict[word],colordict[word])\n if self.width < self.word_size[0]:\n #If the word is bigger than the surface, expand the surface.\n self.expand(self.word_size[0]-self.width,0)\n elif self.height < self.word_size[1]:\n self.expand(0,self.word_size[1]-self.height)\n position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]\n #the initial position is determined\n loopcount = 0\n while self.collides(position,self.word_size):\n if loopcount > max_count:\n #If it can't find a position for the word, create a bigger cloud.\n self.expand(expand_width,expand_height) \n loopcount = 0\n position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]\n loopcount += 1\n self.plot_word(position)\n num_words += 1\n",
"def text_cloud(self,text,max_text_size=72,min_text_size=12,expand_width=50,expand_height=50,max_count=100000):\n '''Creates a word cloud using plain text.'''\n worddict = assign_fonts(tuplecount(text),max_text_size,min_text_size,self.exclude_words)\n sorted_worddict = list(reversed(sorted(worddict.keys(), key=lambda x: worddict[x])))\n for word in sorted_worddict:\n self.render_word(word,worddict[word],(randint(0,255),randint(0,255),randint(0,255)))\n if self.width < self.word_size[0]:\n #If the word is bigger than the surface, expand the surface.\n self.expand(self.word_size[0]-self.width,0)\n elif self.height < self.word_size[1]:\n self.expand(0,self.word_size[1]-self.height)\n position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]\n loopcount = 0\n while self.collides(position,self.word_size):\n if loopcount > max_count:\n #If it can't find a position for the word, expand the cloud.\n self.expand(expand_width,expand_height)\n loopcount = 0\n position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]\n loopcount += 1\n self.plot_word(position)\n"
] |
class Cloud(object):
def __init__(self,width=500,height=500):
pygame.init()
pygame.font.init()
self.width = width
self.height = height
self.cloud = pygame.Surface((width,height))
self.used_pos = []
def render_word(self,word,size,color):
'''Creates a surface that contains a word.'''
pygame.font.init()
font = pygame.font.Font(None,size)
self.rendered_word = font.render(word,0,color)
self.word_size = font.size(word)
def plot_word(self,position):
'''Blits a rendered word on to the main display surface'''
posrectangle = pygame.Rect(position,self.word_size)
self.used_pos.append(posrectangle)
self.cloud.blit(self.rendered_word,position)
def collides(self,position,size):
'''Returns True if the word collides with another plotted word.'''
word_rect = pygame.Rect(position,self.word_size)
if word_rect.collidelistall(self.used_pos) == []:
return False
else:
return True
def expand(self,delta_width,delta_height):
'''Makes the cloud surface bigger. Maintains all word positions.'''
temp_surface = pygame.Surface((self.width + delta_width,self.height + delta_height))
(self.width,self.height) = (self.width + delta_width, self.height + delta_height)
temp_surface.blit(self.cloud,(0,0))
self.cloud = temp_surface
def directory_cloud(self,directory,max_text_size=72,min_text_size=12,expand_width=50,expand_height=50,max_count=100000):
'''Creates a word cloud using files from a directory.
The color of the words correspond to the amount of documents the word occurs in.'''
worddict = assign_fonts(tuplecount(read_dir(directory)),max_text_size,min_text_size,self.exclude_words)
sorted_worddict = list(reversed(sorted(worddict.keys(), key=lambda x: worddict[x])))
colordict = assign_colors(dir_freq(directory))
num_words = 0
for word in sorted_worddict:
self.render_word(word,worddict[word],colordict[word])
if self.width < self.word_size[0]:
#If the word is bigger than the surface, expand the surface.
self.expand(self.word_size[0]-self.width,0)
elif self.height < self.word_size[1]:
self.expand(0,self.word_size[1]-self.height)
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
#the initial position is determined
loopcount = 0
while self.collides(position,self.word_size):
if loopcount > max_count:
#If it can't find a position for the word, create a bigger cloud.
self.expand(expand_width,expand_height)
loopcount = 0
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
loopcount += 1
self.plot_word(position)
num_words += 1
def text_cloud(self,text,max_text_size=72,min_text_size=12,expand_width=50,expand_height=50,max_count=100000):
'''Creates a word cloud using plain text.'''
worddict = assign_fonts(tuplecount(text),max_text_size,min_text_size,self.exclude_words)
sorted_worddict = list(reversed(sorted(worddict.keys(), key=lambda x: worddict[x])))
for word in sorted_worddict:
self.render_word(word,worddict[word],(randint(0,255),randint(0,255),randint(0,255)))
if self.width < self.word_size[0]:
#If the word is bigger than the surface, expand the surface.
self.expand(self.word_size[0]-self.width,0)
elif self.height < self.word_size[1]:
self.expand(0,self.word_size[1]-self.height)
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
loopcount = 0
while self.collides(position,self.word_size):
if loopcount > max_count:
#If it can't find a position for the word, expand the cloud.
self.expand(expand_width,expand_height)
loopcount = 0
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
loopcount += 1
self.plot_word(position)
def display(self):
'''Displays the word cloud to the screen.'''
pygame.init()
self.display = pygame.display.set_mode((self.width,self.height))
self.display.blit(self.cloud,(0,0))
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
return
def save(self,filename):
'''Saves the cloud to a file.'''
pygame.image.save(self.cloud,filename)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.